text
stringlengths 2
999k
|
|---|
"""Return the indices of the maximum values along an axis."""
from __future__ import annotations
from typing import Any, Optional
import numpy
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
@implements(numpy.argmax)
def argmax(
a: PolyLike,
axis: Optional[int] = None,
out: Optional[numpy.ndarray] = None,
) -> Any:
"""
Return the indices of the maximum values along an axis.
As polynomials are not inherently sortable, values are sorted using the
highest `lexicographical` ordering. Between the values that have the same
highest ordering, the elements are sorted using the coefficients. This also
ensures that the method behaves as expected with ``numpy.ndarray``.
Args:
a:
Input array.
axis:
By default, the index is into the flattened array, otherwise along
the specified axis.
out:
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns:
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes:
In case of multiple occurrences of the maximum values, the
indices corresponding to the first occurrence are returned.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> numpoly.argmax([13, 7])
0
>>> numpoly.argmax([1, q0, q0**2, q1])
2
>>> numpoly.argmax([1, q0, q1])
2
>>> numpoly.argmax([[3*q0**2, q0**2],
... [2*q0**2, 4*q0**2]], axis=0)
array([0, 1])
"""
a = numpoly.aspolynomial(a)
options = numpoly.get_options()
proxy = numpoly.sortable_proxy(
a, graded=options["sort_graded"], reverse=options["sort_reverse"])
return numpy.argmax(proxy, axis=axis, out=out)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wagmi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
"""Market module to interact with Serum DEX."""
from __future__ import annotations
from typing import List
from solana.account import Account
from solana.publickey import PublicKey
from solana.rpc.async_api import AsyncClient
from solana.rpc.types import RPCResponse, TxOpts
from solana.transaction import Transaction
from pyserum import instructions
import pyserum.market.types as t
from .._layouts.open_orders import OPEN_ORDERS_LAYOUT
from ..enums import OrderType, Side
from ..async_open_orders_account import AsyncOpenOrdersAccount
from ..async_utils import load_bytes_data
from ._internal.queue import decode_event_queue, decode_request_queue
from .orderbook import OrderBook
from .state import MarketState
from .core import MarketCore
LAMPORTS_PER_SOL = 1000000000
# pylint: disable=too-many-public-methods,abstract-method
class AsyncMarket(MarketCore):
"""Represents a Serum Market."""
def __init__(self, conn: AsyncClient, market_state: MarketState, force_use_request_queue: bool = False) -> None:
super().__init__(market_state=market_state, force_use_request_queue=force_use_request_queue)
self._conn = conn
@classmethod
# pylint: disable=unused-argument
async def load(
cls,
conn: AsyncClient,
market_address: PublicKey,
program_id: PublicKey = instructions.DEFAULT_DEX_PROGRAM_ID,
force_use_request_queue: bool = False,
) -> AsyncMarket:
"""Factory method to create a Market.
:param conn: The connection that we use to load the data, created from `solana.rpc.api`.
:param market_address: The market address that you want to connect to.
:param program_id: The program id of the given market, it will use the default value if not provided.
"""
market_state = await MarketState.async_load(conn, market_address, program_id)
return cls(conn, market_state, force_use_request_queue)
async def find_open_orders_accounts_for_owner(self, owner_address: PublicKey) -> List[AsyncOpenOrdersAccount]:
return await AsyncOpenOrdersAccount.find_for_market_and_owner(
self._conn, self.state.public_key(), owner_address, self.state.program_id()
)
async def load_bids(self) -> OrderBook:
"""Load the bid order book"""
bytes_data = await load_bytes_data(self.state.bids(), self._conn)
return self._parse_bids_or_asks(bytes_data)
async def load_asks(self) -> OrderBook:
"""Load the ask order book."""
bytes_data = await load_bytes_data(self.state.asks(), self._conn)
return self._parse_bids_or_asks(bytes_data)
async def load_orders_for_owner(self, owner_address: PublicKey) -> List[t.Order]:
"""Load orders for owner."""
bids = await self.load_bids()
asks = await self.load_asks()
open_orders_accounts = await self.find_open_orders_accounts_for_owner(owner_address)
return self._parse_orders_for_owner(bids, asks, open_orders_accounts)
async def load_event_queue(self) -> List[t.Event]:
"""Load the event queue which includes the fill item and out item. For any trades two fill items are added to
the event queue. And in case of a trade, cancel or IOC order that missed, out items are added to the event
queue.
"""
bytes_data = await load_bytes_data(self.state.event_queue(), self._conn)
return decode_event_queue(bytes_data)
async def load_request_queue(self) -> List[t.Request]:
bytes_data = await load_bytes_data(self.state.request_queue(), self._conn)
return decode_request_queue(bytes_data)
async def load_fills(self, limit=100) -> List[t.FilledOrder]:
bytes_data = await load_bytes_data(self.state.event_queue(), self._conn)
return self._parse_fills(bytes_data, limit)
async def place_order( # pylint: disable=too-many-arguments,too-many-locals
self,
payer: PublicKey,
owner: Account,
order_type: OrderType,
side: Side,
limit_price: float,
max_quantity: float,
client_id: int = 0,
opts: TxOpts = TxOpts(),
) -> RPCResponse: # TODO: Add open_orders_address_key param and fee_discount_pubkey
transaction = Transaction()
signers: List[Account] = [owner]
open_order_accounts = await self.find_open_orders_accounts_for_owner(owner.public_key())
if open_order_accounts:
place_order_open_order_account = open_order_accounts[0].address
else:
mbfre_resp = await self._conn.get_minimum_balance_for_rent_exemption(OPEN_ORDERS_LAYOUT.sizeof())
place_order_open_order_account = self._after_oo_mbfre_resp(
mbfre_resp=mbfre_resp, owner=owner, signers=signers, transaction=transaction
)
# TODO: Cache new_open_orders_account
# TODO: Handle fee_discount_pubkey
self._prepare_order_transaction(
transaction=transaction,
payer=payer,
owner=owner,
order_type=order_type,
side=side,
signers=signers,
limit_price=limit_price,
max_quantity=max_quantity,
client_id=client_id,
open_order_accounts=open_order_accounts,
place_order_open_order_account=place_order_open_order_account,
)
return await self._conn.send_transaction(transaction, *signers, opts=opts)
async def cancel_order_by_client_id(
self, owner: Account, open_orders_account: PublicKey, client_id: int, opts: TxOpts = TxOpts()
) -> RPCResponse:
txs = self._build_cancel_order_by_client_id_tx(
owner=owner, open_orders_account=open_orders_account, client_id=client_id
)
return await self._conn.send_transaction(txs, owner, opts=opts)
async def cancel_order(self, owner: Account, order: t.Order, opts: TxOpts = TxOpts()) -> RPCResponse:
txn = self._build_cancel_order_tx(owner=owner, order=order)
return await self._conn.send_transaction(txn, owner, opts=opts)
async def match_orders(self, fee_payer: Account, limit: int, opts: TxOpts = TxOpts()) -> RPCResponse:
txn = self._build_match_orders_tx(limit)
return await self._conn.send_transaction(txn, fee_payer, opts=opts)
async def settle_funds( # pylint: disable=too-many-arguments
self,
owner: Account,
open_orders: AsyncOpenOrdersAccount,
base_wallet: PublicKey,
quote_wallet: PublicKey, # TODO: add referrer_quote_wallet.
opts: TxOpts = TxOpts(),
) -> RPCResponse:
# TODO: Handle wrapped sol accounts
should_wrap_sol = self._settle_funds_should_wrap_sol()
if should_wrap_sol:
mbfre_resp = await self._conn.get_minimum_balance_for_rent_exemption(165)
min_bal_for_rent_exemption = mbfre_resp["result"]
else:
min_bal_for_rent_exemption = 0 # value only matters if should_wrap_sol
transaction = self._build_settle_funds_tx(
owner=owner,
open_orders=open_orders,
base_wallet=base_wallet,
quote_wallet=quote_wallet,
min_bal_for_rent_exemption=min_bal_for_rent_exemption,
should_wrap_sol=should_wrap_sol,
)
return await self._conn.send_transaction(transaction, owner, opts=opts)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.redis.v1 CloudRedis API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.redis_v1.gapic import cloud_redis_client_config
from google.cloud.redis_v1.gapic import enums
from google.cloud.redis_v1.gapic.transports import cloud_redis_grpc_transport
from google.cloud.redis_v1.proto import cloud_redis_pb2
from google.cloud.redis_v1.proto import cloud_redis_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-redis").version
class CloudRedisClient(object):
"""
Configures and manages Cloud Memorystore for Redis instances
Google Cloud Memorystore for Redis v1
The ``redis.googleapis.com`` service implements the Google Cloud
Memorystore for Redis API and defines the following resource model for
managing Redis instances:
- The service works with a collection of cloud projects, named:
``/projects/*``
- Each project has a collection of available locations, named:
``/locations/*``
- Each location has a collection of Redis instances, named:
``/instances/*``
- As such, Redis instances are resources of the form:
``/projects/{project_id}/locations/{location_id}/instances/{instance_id}``
Note that location\_id must be referring to a GCP ``region``; for
example:
- ``projects/redpepper-1290/locations/us-central1/instances/my-redis``
"""
SERVICE_ADDRESS = "redis.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.redis.v1.CloudRedis"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudRedisClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def instance_path(cls, project, location, instance):
"""Return a fully-qualified instance string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/instances/{instance}",
project=project,
location=location,
instance=instance,
)
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.CloudRedisGrpcTransport,
Callable[[~.Credentials, type], ~.CloudRedisGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = cloud_redis_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cloud_redis_grpc_transport.CloudRedisGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = cloud_redis_grpc_transport.CloudRedisGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_instances(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all Redis instances owned by a project in either the specified
location (region) or all locations.
The location should have the following format:
- ``projects/{project_id}/locations/{location_id}``
If ``location_id`` is specified as ``-`` (wildcard), then all regions
available to the project are queried, and the results are aggregated.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_instances(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_instances(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The resource name of the instance location using the form:
``projects/{project_id}/locations/{location_id}`` where ``location_id``
refers to a GCP region.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.redis_v1.types.Instance` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_instances" not in self._inner_api_calls:
self._inner_api_calls[
"list_instances"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_instances,
default_retry=self._method_configs["ListInstances"].retry,
default_timeout=self._method_configs["ListInstances"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.ListInstancesRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_instances"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="instances",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_instance(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the details of a specific Redis instance.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_instance" not in self._inner_api_calls:
self._inner_api_calls[
"get_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_instance,
default_retry=self._method_configs["GetInstance"].retry,
default_timeout=self._method_configs["GetInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.GetInstanceRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_instance(
self,
parent,
instance_id,
instance,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default
network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
The creation is executed asynchronously and callers may check the
returned operation to track its progress. Once the operation is
completed the Redis instance will be fully functional. Completed
longrunning.Operation will contain the new instance object in the
response field.
The returned operation is automatically deleted after a few hours, so
there is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1
>>> from google.cloud.redis_v1 import enums
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>> instance_id = 'test_instance'
>>> tier = enums.Instance.Tier.BASIC
>>> memory_size_gb = 1
>>> instance = {'tier': tier, 'memory_size_gb': memory_size_gb}
>>>
>>> response = client.create_instance(parent, instance_id, instance)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The resource name of the instance location using the form:
``projects/{project_id}/locations/{location_id}`` where ``location_id``
refers to a GCP region.
instance_id (str): Required. The logical name of the Redis instance in the customer project
with the following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
instance (Union[dict, ~google.cloud.redis_v1.types.Instance]): Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1.types.Instance`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_instance" not in self._inner_api_calls:
self._inner_api_calls[
"create_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_instance,
default_retry=self._method_configs["CreateInstance"].retry,
default_timeout=self._method_configs["CreateInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.CreateInstanceRequest(
parent=parent, instance_id=instance_id, instance=instance
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["create_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_redis_pb2.Instance,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
def update_instance(
self,
update_mask,
instance,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the metadata and configuration of a specific Redis instance.
Completed longrunning.Operation will contain the new instance object
in the response field. The returned operation is automatically deleted
after a few hours, so there is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> paths_element = 'display_name'
>>> paths_element_2 = 'memory_size_gb'
>>> paths = [paths_element, paths_element_2]
>>> update_mask = {'paths': paths}
>>> display_name = ' instance.memory_size_gb=4'
>>> instance = {'display_name': display_name}
>>>
>>> response = client.update_instance(update_mask, instance)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
update_mask (Union[dict, ~google.cloud.redis_v1.types.FieldMask]): Required. Mask of fields to update. At least one path must be supplied
in this field. The elements of the repeated paths field may only include
these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1.types.FieldMask`
instance (Union[dict, ~google.cloud.redis_v1.types.Instance]): Required. Update description. Only fields specified in update\_mask are
updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1.types.Instance`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_instance" not in self._inner_api_calls:
self._inner_api_calls[
"update_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_instance,
default_retry=self._method_configs["UpdateInstance"].retry,
default_timeout=self._method_configs["UpdateInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.UpdateInstanceRequest(
update_mask=update_mask, instance=instance
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("instance.name", instance.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["update_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_redis_pb2.Instance,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
def import_instance(
self,
name,
input_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be
IMPORTING for entire operation. When complete, the instance will contain
only data from the imported file.
The returned operation is automatically deleted after a few hours, so
there is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `input_config`:
>>> input_config = {}
>>>
>>> response = client.import_instance(name, input_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
input_config (Union[dict, ~google.cloud.redis_v1.types.InputConfig]): Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1.types.InputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "import_instance" not in self._inner_api_calls:
self._inner_api_calls[
"import_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_instance,
default_retry=self._method_configs["ImportInstance"].retry,
default_timeout=self._method_configs["ImportInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.ImportInstanceRequest(
name=name, input_config=input_config
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["import_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_redis_pb2.Instance,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
def export_instance(
self,
name,
output_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
The returned operation is automatically deleted after a few hours, so
there is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.export_instance(name, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
output_config (Union[dict, ~google.cloud.redis_v1.types.OutputConfig]): Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1.types.OutputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "export_instance" not in self._inner_api_calls:
self._inner_api_calls[
"export_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_instance,
default_retry=self._method_configs["ExportInstance"].retry,
default_timeout=self._method_configs["ExportInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.ExportInstanceRequest(
name=name, output_config=output_config
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["export_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_redis_pb2.Instance,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
def failover_instance(
self,
name,
data_protection_mode,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Initiates a failover of the master node to current replica node for a
specific STANDARD tier Cloud Memorystore for Redis instance.
Example:
>>> from google.cloud import redis_v1
>>> from google.cloud.redis_v1 import enums
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `data_protection_mode`:
>>> data_protection_mode = enums.FailoverInstanceRequest.DataProtectionMode.DATA_PROTECTION_MODE_UNSPECIFIED
>>>
>>> response = client.failover_instance(name, data_protection_mode)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
data_protection_mode (~google.cloud.redis_v1.types.DataProtectionMode): Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED\_DATA\_LOSS by
default.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "failover_instance" not in self._inner_api_calls:
self._inner_api_calls[
"failover_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.failover_instance,
default_retry=self._method_configs["FailoverInstance"].retry,
default_timeout=self._method_configs["FailoverInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.FailoverInstanceRequest(
name=name, data_protection_mode=data_protection_mode
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["failover_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_redis_pb2.Instance,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
def delete_instance(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a specific Redis instance. Instance stops serving and data is
deleted.
Example:
>>> from google.cloud import redis_v1
>>>
>>> client = redis_v1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.delete_instance(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_instance" not in self._inner_api_calls:
self._inner_api_calls[
"delete_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_instance,
default_retry=self._method_configs["DeleteInstance"].retry,
default_timeout=self._method_configs["DeleteInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.DeleteInstanceRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["delete_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=cloud_redis_pb2.OperationMetadata,
)
|
class EasingVectorKeyFrame(VectorKeyFrame, ISealable, IKeyFrame):
"""
A class that enables you to associate easing functions with a System.Windows.Media.Animation.VectorAnimationUsingKeyFrames key frame animation.
EasingVectorKeyFrame()
EasingVectorKeyFrame(value: Vector)
EasingVectorKeyFrame(value: Vector,keyTime: KeyTime)
EasingVectorKeyFrame(value: Vector,keyTime: KeyTime,easingFunction: IEasingFunction)
"""
def CloneCore(self, *args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self, *args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self, *args):
"""
CreateInstanceCore(self: EasingVectorKeyFrame) -> Freezable
Creates a new instance of the System.Windows.Freezable derived class. When creating a derived
class,you must override this method.
Returns: The new instance.
"""
pass
def FreezeCore(self, *args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self, *args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self, *args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified System.Windows.Freezable. If the
object has animated dependency properties,their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def InterpolateValueCore(self, *args):
"""
InterpolateValueCore(self: EasingVectorKeyFrame,baseValue: Vector,keyFrameProgress: float) -> Vector
Interpolates,according to the easing function used,between the previous key frame value and
the value of the current key frame,using the supplied progress increment.
baseValue: The value to animate from.
keyFrameProgress: A value between 0.0 and 1.0,inclusive,that specifies the percentage of time that has elapsed
for this key frame.
Returns: The output value of this key frame given the specified base value and progress.
"""
pass
def OnChanged(self, *args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, value=None, keyTime=None, easingFunction=None):
"""
__new__(cls: type)
__new__(cls: type,value: Vector)
__new__(cls: type,value: Vector,keyTime: KeyTime)
__new__(cls: type,value: Vector,keyTime: KeyTime,easingFunction: IEasingFunction)
"""
pass
EasingFunction = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the easing function applied to the key frame.
Get: EasingFunction(self: EasingVectorKeyFrame) -> IEasingFunction
Set: EasingFunction(self: EasingVectorKeyFrame)=value
"""
EasingFunctionProperty = None
|
from __future__ import print_function
import os
import numpy as np
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
# From Molecules
import sys
sys.path.append('../')
from extract_native_contact.extract_native_contact import ExtractNativeContact
from vae_conv_train_load.cvae_api import CVAE
# For clustering
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from collections import Counter
import matplotlib as mpl
# For reward function
import MDAnalysis as mdanal
from MDAnalysis.analysis.rms import RMSD
from scipy import stats
# For calc_native_contact()
from MDAnalysis.analysis import contacts
import gzip
def scatter_plot_rmsd(data, title, save_path, rmsd_values, vmin=None, vmax=None):
[n,s] = np.histogram(rmsd_values, 11)
d = np.digitize(rmsd_values, s)
cmi = plt.get_cmap('jet')
if vmin == None and vmax == None:
cNorm = mpl.colors.Normalize(vmin=min(rmsd_values), vmax=max(rmsd_values))
else:
cNorm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
p = ax.scatter3D(np.ravel(data[:, 0]),
np.ravel(data[:, 1]),
np.ravel(data[:, 2]),
marker='o', c=scalarMap.to_rgba(rmsd_values))
ax.set_xlim3d(np.amin(np.ravel(data[:, 0])), np.amax(np.ravel(data[:, 0])))
ax.set_ylim3d(np.amin(np.ravel(data[:, 1])), np.amax(np.ravel(data[:, 1])))
ax.set_zlim3d(np.amin(np.ravel(data[:, 2])), np.amax(np.ravel(data[:, 2])))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
scalarMap.set_array(rmsd_values)
fig.colorbar(scalarMap)
plt.title(title)
plt.savefig(save_path, dpi=600)
plt.cla()
plt.close(fig)
def scatter_plot(data, title, save_path, color='b'):
"""
data : numpy array
must be of dimension (n,3).
title : str
title of desired plot.
save_path : str
file name of save location desired. Containing directory must
already exist.
color : str of list
color scheme desired.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title(title)
plt.xlim(np.amin(data[:, 0]), np.amax(data[:, 0]))
plt.ylim(np.amin(data[:, 1]), np.amax(data[:, 1]))
ax.set_zlim(np.amin(data[:, 2]), np.amax(data[:, 2]))
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=color, marker='o')
plt.savefig(save_path)
plt.cla()
plt.close(fig)
def get_cluster_indices(labels, cluster=-1):
"""
labels : DBSCAN.labels_ object (numpy array)
cluster : int
cluster label whose in dices are desired.
cluster=-1 default automatically selects outliers.
"""
indices = []
for i,j in enumerate(labels):
if j == cluster:
indices.append(i)
return indices
def get_all_encoded_data(dir_path, episode, j_num, name):
data = []
if j_num == 0 and episode == 1:
data.append(np.load(dir_path + "/%s_%i_%i.npy" % (name, episode, j_num)))
#print("num = 0 data[0].shape = ",data[0].shape)
#print("returning data[0] = ", data[0])
return data[0]
for i in range(1, episode + 1):
for j in range(j_num + 1):
data.append(np.load(dir_path + "/%s_%i_%i.npy" % (name, i,j)))
#print(data[0], "hello")
#print("data len", len(data))
#print("data[0].shape = ", data[0].shape)
data = np.array(data)
#print(data.shape)
if name[0] == 'e':
return np.reshape(data, (data.shape[0] * data.shape[1], data.shape[-1]))
elif name[0] == 'r':
return np.reshape(data, (data.shape[0] * data.shape[1]))
# TODO: Investigate most recent MDAnalysis update
def calc_native_contact(native_pdb, out_path, dat_file='cont-mat.dat', array_file='cont-mat.array'):
u_native = mdanal.Universe(native_pdb)
# TODO: Automate selection of CA
CA = "(name CA and resid 1:24)"
CA0 = u_native.select_atoms(CA)
ca = contacts.ContactAnalysis1(u_native, selection=(CA, CA), refgroup=(CA0, CA0), radius=8.0,
outfile= out_path + '/' + dat_file)
ca.run(store=True, start=0, stop=1, step=1)
inF_array = gzip.GzipFile(out_path + '/' + array_file +'.gz', 'rb')
s_array = inF_array.read()
inF_array.close()
arr = s_array
arr = np.fromstring(s_array, dtype='float32', sep=' ')
arr = np.reshape(arr, (int(sqrt(arr.shape[0])), int(sqrt(arr.shape[0]))))
for i in range(0, arr.shape[0]):
arr[i][i] = 0.
if i == arr.shape[0] - 1:
break
else:
# condensed numpy arg for effiecency , JPK 8/9/18
arr[i, i+1] = 0.
arr[i+1, i] = 0.
temp = ''
for ind in range(0, arr.shape[0]):
for inj in range(0, arr.shape[0]):
temp += str( arr[ind][inj])
temp += ' '
temp += '\n'
s_array = temp
# copy to another file
outF_array = file(out_path + '/' + array_file, 'wb')
outF_array.write(s_array)
outF_array.close()
# remove zipped array file
os.remove(out_path + '/' + array_file +'.gz')
# TODO: Make class containing parameters
class environment(object):
def __init__(self, cvae_weights_path, sim_steps=20000, traj_out_freq=100, native_pdb=None, output_dir=None):
#TODO: Update environment class with simulation number variable j_sim
# TODO: Generalize to run multiple simulations in 1 RL step.
# State variables
self.rmsd_state = []
self.num_native_contacts = []
self.obs_in_cluster = []
self.num_dbscan_clusters = 1
# TODO: Put in plotting function
self.rmsd_max = -100000
self.rmsd_min = 100000
# TODO: Think of better way to store pdb, dcd files
# If dcd file isn't named like this then it will break the extract native contact
# code. This needs to be fixed.
# IO variables
self.dcd_file = 'output-1.dcd'
self.pdb_file = 'output.pdb'
# TODO: Make input parameter
# For testing purposes
self.initial_pdb = ['/home/a05/data/fs-peptide/raw_MD_data/native-state/fs-peptide-0.pdb',
'/home/a05/data/fs-peptide/raw_MD_data/native-state/fs-peptide-1.pdb',
'/home/a05/data/fs-peptide/raw_MD_data/native-state/fs-peptide-2.pdb',
'/home/a05/data/fs-peptide/raw_MD_data/native-state/fs-peptide-3.pdb',
'/home/a05/data/fs-peptide/raw_MD_data/native-state/fs-peptide-4.pdb']
# TODO: Make input parameter
if native_pdb == None:
# For testing purposes
self.native_pdb = '/home/a05/data/fs-peptide/raw_MD_data/fs-peptide.pdb'
else:
self.native_pdb = native_pdb
self.native_protein = mdanal.Universe(self.native_pdb)
self.cvae_weights_path = cvae_weights_path
self.sim_steps = sim_steps
self.traj_out_freq = traj_out_freq
self.pdb_stack = []
self.rmsd_threshold = 10.0 # Set to random seed?
# TODO: Update parameters based on the size of the data set
# DBSCAN should have 4-5% outliers.
# DBSCAN params
self.d_eps = 0.03
self.d_min_samples = 4 #10
if not os.path.exists(output_dir):
raise Exception("Path " + str(output_dir) + " does not exist!")
self.output_dir = output_dir
# TODO: Put in extractnativecontact class
calc_native_contact(native_pdb=self.native_pdb,
out_path=self.output_dir + '/results/final_output',
dat_file='native-cont-mat.dat',
array_file='native-cont-mat.array')
def initial_state(self, path):
# Run MD simulation
self.MDsimulation(path)
# TODO: 0 index the i_episode
self.internal_step(path=path, i_episode=1, j_cycle=0)
return np.array(self.rmsd_state)
# TODO: Generalize for user defined state and use this function as a return value
# for other functions (initial_state)
def get_state(self):
return np.array(self.rmsd_state)
def reward(self):
# Before calc assert that each vector is the same length
if len(self.rmsd_state) != len(self.num_native_contacts):
raise Exception("Shape mismatch")
if len(self.rmsd_state) != len(self.obs_in_cluster):
raise Exception("Shape mismatch")
reward = 0.0
n = self.sim_steps/self.traj_out_freq # 200
for i in range(n):
#print('num:', num = float(self.num_native_contacts[i]) + self.rmsd_threshold)
#print('den:', den = float(self.obs_in_cluster[i]) + self.rmsd_state[i])
num = float(self.num_native_contacts[i]) + self.rmsd_threshold
den = float(self.obs_in_cluster[i]) + self.rmsd_state[i]
print('num', num)
print('den', den)
print('float(self.num_native_contacts[i]):',float(self.num_native_contacts[i]))
print('float(self.obs_in_cluster[i]):', float(self.obs_in_cluster[i]))
print('self.rmsd_state[i]:', self.rmsd_state[i])
reward += num/den
if self.num_dbscan_clusters < 0:
print('obs less than 0:', self.num_dbscan_clusters)
return (self.num_dbscan_clusters*reward/n)
def step(self, action, path, i_episode, j_cycle):
# Take action
#return state, reward, done
print("Before update:",self.rmsd_threshold)
self.rmsd_threshold += action
print("After update:",self.rmsd_threshold)
print("len of pdb_stack before sim:",len(self.pdb_stack))
self.MDsimulation(path)
self.internal_step(path, i_episode, j_cycle=j_cycle)
print("len of pdb_stack After sim:",len(self.pdb_stack))
# plot_entire_episode("/results/final_output/intermediate_data/encoded_data_rl_%i_%i.npy" % (i_episode, j_cycle), self.output_dir + "/results/final_output/")
# TODO: Think about what done should be (RL process)
return (np.array(self.rmsd_state), self.reward(), len(self.pdb_stack) == 0)
# TODO: Take in openMM simulation object to allow user to specify all parameters
# TODO: Reuse one simulation throughout the enitre process. Just update coordinates and minimize energy (maybe)
def MDsimulation(self, path, out_dcd_file=None, pdb_in=None,
ff='amber14-all.xml',
water_model='amber14/tip3pfb.xml'):
if not os.path.exists(path):
raise Exception("Path: " + str(path) + " does not exist!")
if out_dcd_file==None:
out_dcd_file=self.dcd_file
if pdb_in==None:
if len(self.pdb_stack) == 0:
pdb_in = self.initial_pdb[0]
print("Using initial PDB")
else:
pdb_in = self.pdb_stack[-1]
self.pdb_stack.pop()
pdb = PDBFile(pdb_in)
forcefield = ForceField(ff, water_model)
system = forcefield.createSystem(pdb.topology, nonbondedMethod=NoCutoff,
nonbondedCutoff=1.0*nanometer, constraints=HBonds)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)
simulation = Simulation(pdb.topology, system, integrator)
# Start back from output.pdb last frame
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy()
# Saves step every "traj_out_freq" to a DCD file.
simulation.reporters.append(DCDReporter(path + out_dcd_file, self.traj_out_freq))
# For every step saved in the DCD we save a corresponding PDB file.
for i in range(self.sim_steps/self.traj_out_freq):
simulation.reporters.append(PDBReporter(path + "pdb_data/output-%i.pdb" % i, self.traj_out_freq))
simulation.step(self.traj_out_freq)
simulation.reporters.pop()
# Writes the final PDB file to the same directory where the DCD file is saved.
fin = open(path + "pdb_data/output-%i.pdb" % (self.sim_steps/self.traj_out_freq - 1))
final_pdb_data = fin.read()
fin.close()
fout = open(path + "/output.pdb", 'w')
fout.write(final_pdb_data)
fout.close()
def internal_step(self, path, i_episode, j_cycle):
# Calculate contact matrix
self.extract_contact_matrix(path)
# Pass contact matrix through CVAE and retrieve encoded_data
encoded_data = self.CVAE_latent_space(path)
# Save encoded_data for analysis
np.save(self.output_dir + "/results/final_output/intermediate_data/encoded_data_rl_%i_%i.npy" % (i_episode, j_cycle),
encoded_data)
# Calculate rmsd values for each PDB file sampled.
self.calc_rmsd_values(path)
# Save rmsd_state for analysis
np.save(self.output_dir + "/results/final_output/intermediate_data/rmsd_data_rl_%i_%i.npy" % (i_episode, j_cycle),
self.rmsd_state)
# Calculate number of native contacts for state
self.calc_num_native_contacts(path)
# Perform DBSCAN clustering on all the data produced in the ith RL iteration.
db = DBSCAN(eps=self.d_eps, min_samples=self.d_min_samples).fit(encoded_data)
# Build dictionary of the form {cluster id : number of occurences}
labels_dict = Counter(db.labels_)
# Compute number of DBSCAN clusters for reward function
self.calc_num_dbscan_clusters(labels_dict)
# Compute number of observations in the DBSCAN cluster of the ith PDB
self.calc_obs_in_cluster(labels_dict, db.labels_)
# Finds outliers in the latent space and adds them to self.pdb_stack to spawn new MD simulations
self.get_outliers(path, labels_dict, db.labels_)
def extract_contact_matrix(self, path):
"""
EFFECTS: Generates contact matrices using ExtractNativeContact and outputs
the cont-mat.array and cont-mat.dat files in the native-contact/data
directory.
Parameters:
path : string
path of the directory containing the pdb_file and dcd_file
Returns: Nothing
Saves: cont-mat.array and cont-mat.dat in path/native-contact/data
"""
cm = ExtractNativeContact(path, self.pdb_file, self.dcd_file)
cm.generate_contact_matrix()
def CVAE_latent_space(self, path):
"""
EFFECTS: Computes CVAE generated latent space from contact matrix
stored in path/native-contact/data/cont-mat.array.
Parameters:
path : string
path of the directory containing the native-contact directory
Returns:
encoded_data : numpy array
numpy array with shape (sim_steps/traj_out_freq, 3)
"""
# CVAE should be declared in __init__
cvae = CVAE(path=path, sep_train=0, sep_test=0, sep_pred=1, f_traj=self.sim_steps/self.traj_out_freq)
cvae.load_contact_matrix(path + "native-contact/data/cont-mat.dat",
path + "native-contact/data/cont-mat.array")
cvae.compile()
cvae.load_weights(self.cvae_weights_path)
return cvae.encode_pred() # encoded_data
def calc_rmsd_values(self, path):
"""
EFFECTS: First empties self.rmsd_state then refils it with updated
RMSD to native state values from the latest latent sampling.
Also updates self.rmsd_max and self.rmsd_min which are used
for plotting.
Parameters:
path : string
path of the directory containing the pdb_data directory
Returns: Nothing
"""
self.rmsd_state = []
for i in range(self.sim_steps/self.traj_out_freq):
path_1 = path + "/pdb_data/output-%i.pdb" % i
u = mdanal.Universe(path_1)
R = RMSD(u, self.native_protein)
R.run()
self.rmsd_state.append(R.rmsd[0,2])
if(max(self.rmsd_state) > self.rmsd_max):
self.rmsd_max = max(self.rmsd_state)
if(min(self.rmsd_state) < self.rmsd_min):
self.rmsd_min = min(self.rmsd_state)
def calc_num_native_contacts(self, path):
"""
EFFECTS: First empties self.num_native_contacts then refils it with updated
native contact values from the latest latent sampling.
Parameters:
path : string
path of the directory containing the pdb_data directory
Returns: Nothing
"""
self.num_native_contacts = []
# TODO: Move this code block to __init__ and make n, and native_cont_mat attributes
##########
fin = open(self.output_dir + '/results/final_output/native-cont-mat.array', "r")
native_cont_mat = fin.read()
fin.close()
native_cont_mat = np.fromstring(native_cont_mat, dtype='float32', sep=' ')
n = int(sqrt(native_cont_mat.shape[0]))
##########
# TODO: Consider putting in ExtractNativeContact
for i in range(self.sim_steps/self.traj_out_freq):
fin = open(path + "native-contact/raw/cont-mat_%i.array" % i)
ith_cont_mat = fin.read()
fin.close()
ith_cont_mat = np.fromstring(ith_cont_mat, dtype='float32', sep=' ')
counter = 0
row = 0
while row < n + 2:
col = row + 2
shift = row * n
while col < n:
if ith_cont_mat[shift + col] == native_cont_mat[shift + col]:
counter += 1
col += 1
row += 1
self.num_native_contacts.append(counter)
def calc_num_dbscan_clusters(self, labels_dict):
"""
EFFECTS: Resets self.num_dbscan_clusters with the updated
clustering from the latest latent sampling.
Parameters:
labels_dict : dict
Dictionary with key = cluster id and value = number of members
of a DBSCAN cluster db.labels_
Returns: Nothing
"""
self.num_dbscan_clusters = len(labels_dict)
def calc_obs_in_cluster(self, labels_dict, labels):
"""
EFFECTS: Resets self.obs_in_cluster with the updated
clustering from the latest latent sampling.
Parameters:
labels_dict : dict
Dictionary with key = cluster id and value = number of members
of a DBSCAN cluster db.labels_
labels : list
List containing the DBSCAN clustered label data of each point
in the latent space. db.labels_
Returns: Nothing
"""
self.obs_in_cluster = []
for label in labels:
self.obs_in_cluster.append(labels_dict[label])
def get_outliers(self, path, labels_dict, labels):
"""
EFFECTS: Searches all the clusters for outliers. If the cluster id is -1
it is considered a DBSCAN outlier and any latent point DBSCAN
outliers with RMSD to native state less than self.rmsd_threshold
are added to self.pdb_stack to spawn new MD simulations. If the
cluster id is not -1 then we collect all the rmsd values in
rmsd_values and then compute a numpy array with the z-scores
of the rmsd_values distribution. Since, the CVAE generates
normally distributed clusters, the RMSD values should be normally
distributed. Thus, any RMSD values with z-score < -3 are marked as
outliers within the DBSCAN cluster and are added to self.pdb_stack.
Parameters:
path : string
path of the directory containing the pdb_data directory
labels_dict : dict
Dictionary with key = cluster id and value = number of members
of a DBSCAN cluster db.labels_
labels : list
List containing the DBSCAN clustered label data of each point
in the latent space. db.labels_
Returns: Nothing
"""
for cluster in labels_dict:
print('dbscan cluster:', cluster)
print('dbscan clusters:', labels_dict)
indices = get_cluster_indices(labels=labels, cluster=cluster)
path_to_pdb = []
rmsd_values = []
for ind in indices:
path_1 = path + "/pdb_data/output-%i.pdb" % ind
# For DBSCAN outliers
if cluster == -1:
if self.rmsd_state[ind] < self.rmsd_threshold:
# Start next rl iteration with this pdb path_1
print("RMSD threshold:", self.rmsd_threshold)
print("RMSD to native contact for DBSCAN outlier at index %i :" % ind, self.rmsd_state[ind])
self.pdb_stack.append(path_1)
# For RMSD outliers within DBSCAN clusters
else:
rmsd_values.append(self.rmsd_state[ind])
path_to_pdb.append((path_1, ind))
# For RMSD outliers within DBSCAN clusters
if cluster != -1:
rmsd_array = np.array(rmsd_values)
rmsd_zscores = stats.zscore(rmsd_array)
ind = 0
for zscore in rmsd_zscores:
# z-score of -3 marks outlier for a normal distribution.
# Assuming Normal Distribution of RMSD values because
# CVAE yields normally distributed clusters.
if zscore <= -3:
print("RMSD to native contact for DBSCAN clustered outlier at index %i :" % path_to_pdb[ind][1], rmsd_values[ind])
self.pdb_stack.append(path_to_pdb[ind][0])
ind += 1
#TODO calculate RMSD max + min from numpy arrays, then remove this function from environment
def plot_intermediate_episode(self, in_path, out_path, episode, j_cycle, title):
"""
EFFECTS: Plots DBscan clusters based on cluster value, and then by rmsd.
Plots include all previous simulation data, if any, but no data
from episodes/cycles occuring later.
Parameters:
in_path: string
path where rmsd_data.npy and encodeded_data.npy are stored
out_path: string
path to folder the plots should be saved to
episode: int
which episode should be plotted (indexed from 1)
j_cycle: int
which cycle should be plotted (indexed from 0)
title: string
appended to begin of each plot, implemented to create final vs. intermediate plot. (omit ?)
Returns: Nothing
Saves: Two plots in 'out_path' folder
"""
int_encoded_data = get_all_encoded_data(in_path, episode, j_cycle, 'encoded_data_rl')
int_rmsd_data = get_all_encoded_data(in_path, episode, j_cycle, 'rmsd_data_rl')
print("int_encoded_data:", len(int_encoded_data))
print("int_rmsd_data:", len(int_rmsd_data))
db = DBSCAN(eps=self.d_eps, min_samples=self.d_min_samples).fit(int_encoded_data)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print(Counter(db.labels_))
scatter_plot(int_encoded_data,
'%s Latent Space (Clusters: %d, RL Episode: %i_%i)' % (title, n_clusters_, episode, j_cycle),
out_path + "dbscan_clusters_rl_%i_%i.png" % (episode, j_cycle),
color=db.labels_)
scatter_plot_rmsd(int_encoded_data,
"%s Latent Space (RL Episode: %i_%i)" % (title, episode, j_cycle),
out_path + "cluster_rmsd_rl_%i_%i.png" % (episode, j_cycle),
rmsd_values=int_rmsd_data,
vmin=self.rmsd_min,
vmax=self.rmsd_max)
|
import copy
class CleanseData():
'''
This class contains methods for cleansing the data extracted from DrugBank containing info. about cardiovascular drugs.
There are 3 main parts of the data cleansing process:
1. Remove drugs that do not interact with any entity or those that only interact with entities without a uniprot ID from list of CV drugs.
This is implemented via the removeDrugs method.
2. Remove proteins with a null uniprot ID. This is implemented via the removeNullUIDEntity method.
3. Merge entities with the same UniProt ID. More specifically, this is done by replacing entity that is classified as 'protein group' on
the corresponding DrugBank web page (the ParseWeb class is implemented to do web scraping to get this info) with members of that protein
group. The association between the protein group entities and their drug(s) are established via non-specific publication findings. In other
words, the publication in question finds that the drug is related to the protein group, but not any of its members specifically. The
UniProt ID for the protein group is the same as its first member, ordered alpha-numerically.
For example, in our data, the entities 'Beta adrenergic receptor' and 'Beta-1 adrenergic receptor' both have the same UniProt ID (P08588).
'Beta adrenergic receptor' is the protein group, while 'Beta-1 adrenergic receptor' is one of its members. The former is assigned the same
UniProt ID as the latter rather than its other members (Beta-2 adrenergic receptor and Beta-3 adrenergic receptor) b/c Beta-1 adrenergic
receptor is the first member when all of them are ordered alpha-numerically. The associations between 'Beta adrenergic receptor' and its
drugs are established based on non-specific publication findings. These findings show that the drugs are related to this protein group, but
not any of its members specifically.
'''
def __init__(self,DATA):
'''
@param DATA is the data extracted from DrugBank containing info. about cardiovascular drugs. This data will be used to initialize
the object.
'''
self.data = DATA
def getDrugIndexList(self):
'''
@return list(set(index_list)) is a list of index of drugs in the data that do not interact with with any entity (i.e. the value of
'targets', 'carriers', 'enzymes', and 'transporters' is an empty list). These will be removed.
'''
index_list = []
for i,drug in enumerate(self.data):
if drug['targets'] == [] and \
drug['enzymes'] == [] and \
drug['carriers'] == [] and \
drug['transporters'] == []:
index_list.append(i)
return list(set(index_list))
def getNullUIDIndexList(self):
'''
@return indexl is a list of index of drugs in the data that contain at least one entity with a null UniProt ID.
'''
indexl = []
for i,drug in enumerate(self.data):
for entity in ['targets','enzymes','carriers','transporters']:
for j in drug[entity]:
if j['uniprot_id']=="Null":
indexl.append(i)
indexl = list(set(indexl))
return indexl
def getNullUIDIndexInfo(self):
'''
This method calls the getNullUIDIndexList method.
@return nullid_index_info is a list of dictionaries, each containing an index in the list returned by the getNullUIDIndexList method
as the key and a list of the names of ALL the entities (not just the entity with the null UniProt ID) for that particular index as
the values.
'''
indexl = self.getNullUIDIndexList()
nullid_index_info = []
for i in indexl:
names = []
index_dict = {}
for entity in ['targets','enzymes','carriers','transporters']:
for j in self.data[i][entity]:
names.append(j['name'])
index_dict.update({i:names})
nullid_index_info.append(index_dict)
return nullid_index_info
def getDupUIDs(self):
'''
@return dup_uids is a dictionary, with the keys being Uniprot IDs that correspond to multiple entities (i.e. duplicate UniProt IDs) from
the data. The values of the dictionary are DrugBank IDs of entities with the duplicated UniProt ID.
'''
uid_list = []
ent_dict = {}
for drug in self.data:
for entity in ['targets','enzymes','transporters','carriers']:
for ent in drug[entity]:
uid = ent['uniprot_id']
if not uid in uid_list:
uid_list.append(uid)
ent_dict[uid] = [ent['drugbank_id']]
else:
if not ent['drugbank_id'] in ent_dict[uid]:
ent_dict[uid].append(ent['drugbank_id'])
dup_uids = {}
for item in list(ent_dict.items()):
if len(item[1])>1:
dup_uids[item[0]]=item[1]
return dup_uids
def removeNullUIDEntity(self):
'''
This method calls the getNullUIDIndexList method.
@return self.data is the data after removing entities with a null UniProt ID.
'''
indexl = self.getNullUIDIndexList()
for index in indexl:
for entity in ['targets','enzymes','carriers','transporters']:
for i,j in enumerate(self.data[index][entity]):
if j['uniprot_id']=='Null':
del self.data[index][entity][i]
return self.data
def removeDrugs(self):
'''
This method calls the getNullUIDIndexInfo and getDrugIndexList methods.
@return self.data is the data after removing drugs (elements) in the data that interact exclusively with an entity that has a null
UniProt ID.
'''
nullid_index_info = self.getNullUIDIndexInfo()
index_list = self.getDrugIndexList()
for i in nullid_index_info:
if len(list(i.values())[0])==1:
index_list.append((list(i.keys())[0]))
index_list.sort()
for index in reversed(index_list):
del self.data[index]
return self.data
def mergeDuplicateUIDs(self,ent_dbid,ent_list):
'''
@ent_dbid is the list containing the DrugBank IDs of the protein groups that have a duplicate UniProt ID
@ent_list is the list containing dictionaries with each correspinding to protein groups that have a duplicate UniProt ID
@return self.data is the data after replacing the entities with a duplicate UniProt ID and are classified as a 'protein group' with
its members. Each member inherits the actions of the protein group. Because it is not known what the action of the drug is on each
specific member, instead of 'actions', this is represented by a new key called 'actions_of_group'. The name of the protein group
from which each member inherits from is indicated by a new key called 'group_name'.
'''
for drug in self.data:
for entity in ['targets','enzymes','carriers','transporters']:
drug_ent = [i.get('drugbank_id') for i in drug[entity]]
ent2del = [i for i in drug[entity] if i.get('drugbank_id') in ent_dbid]
for ent in ent2del:
dbid = ent['drugbank_id']
actions = ent['actions']
group_name = ent['name']
ent_list_ele = [i for i in ent_list if i.get('drugbank_id')==dbid][0]
members = [i for i in ent_list_ele['members'] if i.get('drugbank_id') not in drug_ent]
for member in members:
tmp = copy.deepcopy(member)
tmp.update({'group_name':group_name,\
'actions_of_group':actions})
drug[entity].append(tmp)
drug[entity] = [i for i in drug[entity] if i.get('drugbank_id') not in ent_dbid]
return self.data
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
class PipelineServiceGrpcTransport(PipelineServiceTransport):
"""gRPC backend transport for PipelineService.
A service for creating and managing Vertex AI's pipelines. This
includes both ``TrainingPipeline`` resources (used for AutoML and
custom training) and ``PipelineJob`` resources (used for Vertex
Pipelines).
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
gca_training_pipeline.TrainingPipeline,
]:
r"""Return a callable for the create training pipeline method over gRPC.
Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
Returns:
Callable[[~.CreateTrainingPipelineRequest],
~.TrainingPipeline]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_training_pipeline" not in self._stubs:
self._stubs["create_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline",
request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize,
response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs["create_training_pipeline"]
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
training_pipeline.TrainingPipeline,
]:
r"""Return a callable for the get training pipeline method over gRPC.
Gets a TrainingPipeline.
Returns:
Callable[[~.GetTrainingPipelineRequest],
~.TrainingPipeline]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_training_pipeline" not in self._stubs:
self._stubs["get_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline",
request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize,
response_deserializer=training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs["get_training_pipeline"]
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
pipeline_service.ListTrainingPipelinesResponse,
]:
r"""Return a callable for the list training pipelines method over gRPC.
Lists TrainingPipelines in a Location.
Returns:
Callable[[~.ListTrainingPipelinesRequest],
~.ListTrainingPipelinesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_training_pipelines" not in self._stubs:
self._stubs["list_training_pipelines"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines",
request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize,
response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize,
)
return self._stubs["list_training_pipelines"]
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete training pipeline method over gRPC.
Deletes a TrainingPipeline.
Returns:
Callable[[~.DeleteTrainingPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_training_pipeline" not in self._stubs:
self._stubs["delete_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline",
request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_training_pipeline"]
@property
def cancel_training_pipeline(
self,
) -> Callable[[pipeline_service.CancelTrainingPipelineRequest], empty_pb2.Empty]:
r"""Return a callable for the cancel training pipeline method over gRPC.
Cancels a TrainingPipeline. Starts asynchronous cancellation on
the TrainingPipeline. The server makes a best effort to cancel
the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
or other methods to check whether the cancellation succeeded or
whether the pipeline completed despite cancellation. On
successful cancellation, the TrainingPipeline is not deleted;
instead it becomes a pipeline with a
[TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelTrainingPipelineRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_training_pipeline" not in self._stubs:
self._stubs["cancel_training_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline",
request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_training_pipeline"]
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest], gca_pipeline_job.PipelineJob
]:
r"""Return a callable for the create pipeline job method over gRPC.
Creates a PipelineJob. A PipelineJob will run
immediately when created.
Returns:
Callable[[~.CreatePipelineJobRequest],
~.PipelineJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_pipeline_job" not in self._stubs:
self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob",
request_serializer=pipeline_service.CreatePipelineJobRequest.serialize,
response_deserializer=gca_pipeline_job.PipelineJob.deserialize,
)
return self._stubs["create_pipeline_job"]
@property
def get_pipeline_job(
self,
) -> Callable[[pipeline_service.GetPipelineJobRequest], pipeline_job.PipelineJob]:
r"""Return a callable for the get pipeline job method over gRPC.
Gets a PipelineJob.
Returns:
Callable[[~.GetPipelineJobRequest],
~.PipelineJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_pipeline_job" not in self._stubs:
self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob",
request_serializer=pipeline_service.GetPipelineJobRequest.serialize,
response_deserializer=pipeline_job.PipelineJob.deserialize,
)
return self._stubs["get_pipeline_job"]
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
pipeline_service.ListPipelineJobsResponse,
]:
r"""Return a callable for the list pipeline jobs method over gRPC.
Lists PipelineJobs in a Location.
Returns:
Callable[[~.ListPipelineJobsRequest],
~.ListPipelineJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_pipeline_jobs" not in self._stubs:
self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs",
request_serializer=pipeline_service.ListPipelineJobsRequest.serialize,
response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize,
)
return self._stubs["list_pipeline_jobs"]
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete pipeline job method over gRPC.
Deletes a PipelineJob.
Returns:
Callable[[~.DeletePipelineJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_pipeline_job" not in self._stubs:
self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob",
request_serializer=pipeline_service.DeletePipelineJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_pipeline_job"]
@property
def cancel_pipeline_job(
self,
) -> Callable[[pipeline_service.CancelPipelineJobRequest], empty_pb2.Empty]:
r"""Return a callable for the cancel pipeline job method over gRPC.
Cancels a PipelineJob. Starts asynchronous cancellation on the
PipelineJob. The server makes a best effort to cancel the
pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]
or other methods to check whether the cancellation succeeded or
whether the pipeline completed despite cancellation. On
successful cancellation, the PipelineJob is not deleted; instead
it becomes a pipeline with a
[PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state]
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelPipelineJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_pipeline_job" not in self._stubs:
self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob",
request_serializer=pipeline_service.CancelPipelineJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_pipeline_job"]
def close(self):
self.grpc_channel.close()
__all__ = ("PipelineServiceGrpcTransport",)
|
from copy import copy
import re
from collections import Iterable
class FilterListException(Exception):
pass
class NotFound(FilterListException):
pass
class InvalidList(FilterListException):
pass
class FilterList(list):
def __init__(self, *args, **kwargs):
super(FilterList, self).__init__(*args, **kwargs)
if not all(isinstance(item, dict) for item in self):
raise InvalidList('Every element in the list must be a dictionary')
self.valid_operations = ['in', 'regex', 'iregex', 'contains', 'icontains', 'iexact']
def __setitem__(self, key, item, *args, **kwargs):
if not isinstance(item, dict):
raise TypeError('All elements must be a dictionary. You are trying to add the element {}, which is not a dictionary'.format(item))
super(FilterList, self).__setitem__(key, item, *args, **kwargs)
def append(self, value, *args, **kwargs):
if not isinstance(value, dict):
raise TypeError('All elements must be a dictionary. You are trying to add the element {}, which is not a dictionary.'.format(value))
super(FilterList, self).append(value, *args, **kwargs)
def get(self, *args, **kwargs):
if args:
msg = 'Get method only accepts keyword arguments. For example, item.get(id=1)'
raise TypeError(msg)
if not kwargs:
msg = 'Get method requires at least one keyword argument. For example, item.get(id=1)'
raise TypeError(msg)
result_list = self.filter(**kwargs)
if not result_list:
raise NotFound('No items found with the following filter: {}'.format(kwargs))
if len(result_list) != 1:
raise FilterListException('Expected to find 1 item but found {}'.format(len(self)))
return result_list[0]
def filter(self, *args, **kwargs):
"""
Accepts a list of keyword arguments to search for.
The key can contain special operations like __regex, __contains
ie, you can pass in name__contains="bob", and this method will return all dictionaries in the list
where name field contains the word "bob"
return: new FilterList object that matches the given kwargs
"""
if args:
msg = 'Get method only accepts keyword arguments. For example, item.get(id=1)'
raise TypeError(msg)
filtered_result = copy(self)
for key_string, value in kwargs.items():
keys = self._get_keys(key_string)
operation = self._get_operation(key_string)
filtered_result = self._get_filtered_list(filtered_result, keys, value, operation)
return self.__class__(filtered_result)
def _get_keys(self, key_string):
"""
parses the key string into a list of keys
for example, if key_string is animal__dog__contains, it will return ['animal', 'dog']
return: a list of keys
"""
assert key_string # should never be an empty string
keys = key_string.split('__')
if keys[-1] in self.valid_operations:
keys = keys[:-1]
return keys
def _get_operation(self, key_string):
"""
returns the operation
"""
assert key_string # should never be an empty string
operation = 'exact'
keys = key_string.split('__')
if keys[-1] in self.valid_operations:
operation = keys[-1]
return operation
def _get_filtered_list(self, original_list, keys, value, operation):
"""
returns a list where the key matches the value based on the operation
"""
result = []
for item in original_list:
# adds the dictionary to the results if any of the keys match the value
if keys[-1] == '_any':
# get the leaf of the nested dictionary
if len(keys) > 1:
try:
nested_dict = self._get_value(keys[:-1], item)
except NotFound:
continue
else:
nested_dict = item
for _, found_value in nested_dict.items():
if self._matched_found_value(value, found_value, operation):
result.append(item)
break
else:
try:
found_value = self._get_value(keys, item)
except NotFound:
continue
if self._matched_found_value(value, found_value, operation):
result.append(item)
return result
def _matched_found_value(self, value, found_value, operation):
"""
Returns : boolean: True if value matches found_value, otherwise, False
:param value: the value to match for
:param found_value: the value found in the dictionary
:param operation: determines how to match the value to found_value
"""
if operation == 'exact':
if found_value == value:
return True
else:
return False
elif operation == 'iexact':
if found_value.lower() == value.lower():
return True
else:
return False
elif operation == 'in':
if isinstance(value, Iterable) and found_value in value:
return True
else:
return False
elif operation == 'regex':
if not isinstance(found_value, str):
return False
if re.search(value, found_value):
return True
else:
return False
elif operation == 'iregex':
if not isinstance(found_value, str):
return False
if re.search(value, found_value, re.IGNORECASE):
return True
else:
return False
elif operation == 'contains':
if not isinstance(found_value, Iterable):
return False
if value in found_value:
return True
else:
return False
elif operation == 'icontains':
if isinstance(found_value, dict):
return False
elif isinstance(found_value, list):
found_value_lower = [item.lower() for item in found_value if isinstance(item, str)]
elif not isinstance(found_value, Iterable):
return False
else:
found_value_lower = found_value.lower()
if value.lower() in found_value_lower:
return True
else:
return False
else:
raise FilterListException('{} is not a valid operation'.format(operation))
def _get_value(self, keys, mydict):
"""
Gets the value from a dictionary based on the keys.
:param keys - a list of keys, each additional key will look for nested keys
:param mydict - a nested dictionary
returns the value of mydict[keys[0]][keys[1]]...
raise NotFound exception if any of the keys is missing
"""
tmp_dict = copy(mydict)
for i, key in enumerate(keys):
if key not in tmp_dict:
raise NotFound
value = tmp_dict.get(key)
if i == len(keys) - 1:
return value
else:
if not isinstance(value, dict):
raise NotFound
tmp_dict = value
|
# util/langhelpers.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
if not compat.py3k and self._exc_info and self._exc_info[1]:
# emulate Py3K's behavior of telling us when an exception
# occurs in an exception handler.
warn(
"An exception has occurred during handling of a "
"previous exception. The previous exception "
"is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1]))
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; "\
"see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0]
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getargspec(fn)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__'):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
compat.inspect_getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = 'target', 'name', '__weakref__'
def __getstate__(self):
return {'target': self.target, 'name': self.name}
def __setstate__(self, state):
self.target = state['target']
self.name = state['name']
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(
c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = compat.inspect_getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(
meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith('_memoized'):
raise AttributeError(key)
elif hasattr(self, '_memoized_attr_%s' % key):
value = getattr(self, '_memoized_attr_%s' % key)()
setattr(self, key, value)
return value
elif hasattr(self, '_memoized_method_%s' % key):
fn = getattr(self, '_memoized_method_%s' % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and not isinstance(kw[key], type_) and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + \
(" (this warning may be suppressed after %d occurrences)" % num)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a paramterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = \
"def set(obj, value):"\
" obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env['set']
class EnsureKWArgType(type):
"""Apply translation of functions to accept **kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getargspec(fn)
if not spec.keywords:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
|
# -*- coding: utf-8 -*-
"""Converter for PomBase."""
import logging
from collections import defaultdict
from typing import Iterable
import bioversions
import click
import pandas as pd
from more_click import verbose_option
from tqdm import tqdm
import pyobo
from pyobo import Reference
from pyobo.struct import Obo, Synonym, Term, from_species, has_gene_product, orthologous
from pyobo.utils.path import ensure_df
logger = logging.getLogger(__name__)
PREFIX = "pombase"
NAME = "PomBase"
URL = "https://www.pombase.org/data/names_and_identifiers/gene_IDs_names_products.tsv"
ORTHOLOGS_URL = "https://www.pombase.org/data/orthologs/human-orthologs.txt.gz"
def get_obo(force: bool = False) -> Obo:
"""Get OBO."""
version = bioversions.get_version("pombase")
return Obo(
iter_terms=get_terms,
iter_terms_kwargs=dict(force=force),
name=NAME,
ontology=PREFIX,
typedefs=[from_species, has_gene_product],
auto_generated_by=f"bio2obo:{PREFIX}",
data_version=version,
)
#: A mapping from PomBase gene type to sequence ontology terms
POMBASE_TO_SO = {
"protein coding gene": "0001217",
"pseudogene": "0000336",
"tRNA gene": "0001272",
"ncRNA gene": "0001263",
"snRNA gene": "0001268",
"snoRNA gene": "0001267",
"rRNA gene": "0001637",
}
def get_terms(force: bool = False) -> Iterable[Term]:
"""Get terms."""
orthologs_df = ensure_df(PREFIX, url=ORTHOLOGS_URL, force=force, header=None)
identifier_to_hgnc_ids = defaultdict(set)
hgnc_symbol_to_id = pyobo.get_name_id_mapping("hgnc")
for identifier, hgnc_symbols in orthologs_df.values:
if hgnc_symbols == "NONE":
continue
for hgnc_symbol in hgnc_symbols.split("|"):
hgnc_id = hgnc_symbol_to_id.get(hgnc_symbol)
if hgnc_id is not None:
identifier_to_hgnc_ids[identifier].add(hgnc_id)
df = ensure_df(PREFIX, url=URL, force=force, header=None)
so = {
gtype: Reference.auto("SO", POMBASE_TO_SO[gtype])
for gtype in sorted(df[df.columns[6]].unique())
}
for _, reference in sorted(so.items()):
yield Term(reference=reference)
for identifier, _, symbol, chromosome, name, uniprot_id, gtype, synonyms in tqdm(df.values):
term = Term.from_triple(
prefix=PREFIX,
identifier=identifier,
name=symbol if pd.notna(symbol) else None,
definition=name if pd.notna(name) else None,
)
term.append_property("chromosome", chromosome[len("chromosome_") :])
term.append_parent(so[gtype])
term.set_species(identifier="4896", name="Schizosaccharomyces pombe")
for hgnc_id in identifier_to_hgnc_ids.get(identifier, []):
term.append_relationship(orthologous, Reference.auto("hgnc", hgnc_id))
if uniprot_id and pd.notna(uniprot_id):
term.append_relationship(has_gene_product, Reference.auto("uniprot", uniprot_id))
if synonyms and pd.notna(synonyms):
for synonym in synonyms.split(","):
term.append_synonym(Synonym(synonym))
yield term
@click.command()
@verbose_option
def _main():
obo = get_obo(force=True)
obo.write_default(force=True, write_obo=True, write_obograph=True)
if __name__ == "__main__":
_main()
|
import pigpio
from pisat.comm.transceiver import Im920, SocketTransceiver
from pisat.core.cansat import CanSat
from pisat.core.nav import Context
from pisat.core.manager import ComponentManager
from pisat.core.logger import (
DataLogger, LogQueue, SystemLogger
)
from pisat.handler import (
PigpioI2CHandler, PyserialSerialHandler, PigpioDigitalOutputHandler
)
from pisat.sensor import Bme280, Opt3002, SamM8Q
from can09.child.model import ChildLoggingModel
from can09.child.nodes import *
from can09.child.setting import *
def run_child():
pi = pigpio.pi()
# handlers
handler_bme280 = PigpioI2CHandler(pi, I2C_ADDRESS_BME280)
handler_opt3002 = PigpioI2CHandler(pi, I2C_ADDRESS_OPT3002)
handler_gps = PyserialSerialHandler(SERIAL_PORT_GPS, BAUDRATE_GPS)
handler_im920 = PyserialSerialHandler(SERIAL_PORT_IM920, BAUDRATE_IM920)
handler_led = PigpioDigitalOutputHandler(pi, GPIO_LED, name=NAME_LED)
# sensors
bme280 = Bme280(handler_bme280, name=NAME_BME280)
opt3002 = Opt3002(handler_opt3002, name=NAME_OPT3002)
gps = SamM8Q(handler_gps, name=NAME_GPS)
# transceiver
im920 = Im920(handler_im920, name=NAME_IM920)
socket_transceiver = SocketTransceiver(im920, certain=True, name=NAME_SOCKET_TRANSCEIVER)
# logger
que = LogQueue(ChildLoggingModel, maxlen=5000, name=NAME_LOGQUEUE)
dlogger = DataLogger(que, bme280, opt3002, gps, name=NAME_DATA_LOGGER)
slogger = SystemLogger(name=NAME_SYSTEM_LOGGER)
slogger.setFileHandler()
manager = ComponentManager(handler_led, im920, socket_transceiver, dlogger,
recursive=True, name=NAME_MANAGER)
context = Context({
MissionStandbyNode: {True: ChildServerNode, False: MissionStandbyNode},
ChildServerNode: {True: None, False: ChildServerNode}
},
start=MissionStandbyNode)
cansat = CanSat(context, manager, dlogger=dlogger, slogger=slogger)
cansat.run()
if __name__ == "__main__":
run_child()
|
# -*- coding: utf-8 -*-
from unittest import TestCase, main
from recc.container.struct.container_status import ContainerStatus
class ContainerStatusTestCase(TestCase):
def test_default(self):
# fmt: off
self.assertEqual(ContainerStatus.Created, ContainerStatus.from_str("created"))
self.assertEqual(ContainerStatus.Restarting, ContainerStatus.from_str("restarting")) # noqa
self.assertEqual(ContainerStatus.Running, ContainerStatus.from_str("running"))
self.assertEqual(ContainerStatus.Removing, ContainerStatus.from_str("removing"))
self.assertEqual(ContainerStatus.Paused, ContainerStatus.from_str("paused"))
self.assertEqual(ContainerStatus.Exited, ContainerStatus.from_str("exited"))
self.assertEqual(ContainerStatus.Dead, ContainerStatus.from_str("dead"))
# fmt: on
self.assertEqual(ContainerStatus.Created, ContainerStatus.from_str("Created"))
self.assertEqual(ContainerStatus.Created, ContainerStatus.from_str("createD"))
self.assertRaises(KeyError, ContainerStatus.from_str, "c")
if __name__ == "__main__":
main()
|
from office365.runtime.client_object import ClientObject
from office365.runtime.client_query import ClientQuery
from office365.runtime.resource_path_entity import ResourcePathEntity
class OutlookEntity(ClientObject):
"""Base Outlook entity."""
def update(self):
qry = ClientQuery.update_entry_query(self)
self.context.add_query(qry)
def delete_object(self):
"""Deletes the outlook entity."""
qry = ClientQuery.delete_entry_query(self)
self.context.add_query(qry)
@property
def resource_path(self):
resource_path = super(OutlookEntity, self).resource_path
if resource_path:
return resource_path
# fallback: create a new resource path
if self.is_property_available("Id"):
return ResourcePathEntity(
self.context,
self._parent_collection.resource_path,
self.properties["Id"])
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Deprecated. Measurement alignment."""
import warnings
from qiskit.transpiler.passes.scheduling.alignments.reschedule import ConstrainedReschedule
class AlignMeasures:
"""Deprecated. Measurement alignment."""
def __new__(cls, alignment=1) -> ConstrainedReschedule:
"""Create new pass.
Args:
alignment: Integer number representing the minimum time resolution to
trigger measure instruction in units of ``dt``. This value depends on
the control electronics of your quantum processor.
Returns:
ConstrainedReschedule instance that is a drop-in-replacement of this class.
"""
warnings.warn(
f"{cls.__name__} has been deprecated as of Qiskit 20.0. "
f"Use ConstrainedReschedule pass instead.",
FutureWarning,
)
return ConstrainedReschedule(acquire_alignment=alignment)
|
from roam_linker_bot import __version__
def test_version():
assert __version__ == "0.1.0"
|
import regym
from regym.rl_algorithms.agents import build_PPO_Agent
from regym.rl_loops.singleagent_loops import rl_loop
from regym.environments import parse_environment
from test_fixtures import ppo_rnd_config_dict_ma
from tqdm import tqdm
from tensorboardX import SummaryWriter
import os
import math
import copy
import random
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import time
offset_worker_id = 50
gif_interval = 100
def make_gif(trajectory, episode=0, actor_idx=0, path='./'):
fig = plt.figure()
imgs = []
for state in trajectory:
if state.shape[-1] == 12:
# handled Stacked images...
per_image_first_channel_indices = range(0,state.shape[-1]+1,3)
ims = [ state[...,idx_begin:idx_end] for idx_begin, idx_end in zip(per_image_first_channel_indices,per_image_first_channel_indices[1:])]
for img in ims:
imgs.append( img)
else:
imgs.append(state)
for idx,img in enumerate(imgs):
imgs[idx] = [plt.imshow(img, animated=True)]
gif = anim.ArtistAnimation(fig, imgs, interval=200, blit=True, repeat_delay=None)
path = os.path.join(path, f'./traj-ep{episode}-actor{actor_idx}.gif')
gif.save(path, dpi=None, writer='imagemagick')
#plt.show()
plt.close(fig)
def make_gif_with_graph(trajectory, data, episode=0, actor_idx=0, path='./'):
fig = plt.figure()
imgs = []
gd = []
for idx, (state, d) in enumerate(zip(trajectory,data)):
if state.shape[-1] == 12:
# handled Stacked images...
per_image_first_channel_indices = range(0,state.shape[-1]+1,3)
ims = [ state[...,idx_begin:idx_end] for idx_begin, idx_end in zip(per_image_first_channel_indices,per_image_first_channel_indices[1:])]
for img in ims:
imgs.append( img)
gd.append(d)
else:
imgs.append(state)
gd.append(d)
for idx,img in enumerate(imgs):
plt.subplot(211)
img = plt.imshow(img, animated=True)
ax = plt.subplot(212)
x = np.arange(0,idx,1)
y = np.asarray(gd[:idx])
ax.set_xlim(left=0,right=idx+10)
line = ax.plot(x, y, color='blue', marker='o', linestyle='dashed',linewidth=2, markersize=10)
imgs[idx] = [img]+line
gif = anim.ArtistAnimation(fig, imgs, interval=200, blit=True, repeat_delay=None)
path = os.path.join(path, f'./traj-ep{episode}-actor{actor_idx}.gif')
gif.save(path, dpi=None, writer='imagemagick')
#plt.show()
plt.close(fig)
def check_path_for_agent(filepath):
#filepath = os.path.join(path,filename)
agent = None
offset_episode_count = 0
if os.path.isfile(filepath):
print('==> loading checkpoint {}'.format(filepath))
agent = torch.load(filepath)
offset_episode_count = agent.episode_count
#setattr(agent, 'episode_count', offset_episode_count)
print('==> loaded checkpoint {}'.format(filepath))
return agent, offset_episode_count
def update_configs(env_param2range, nbr_actors):
env_configs = list()
tower_seed = random.choice(env_param2range['tower-seed'])
#allowed_floors = random.choice(env_param2range['allowed-floors'])
for a_i in range(nbr_actors):
env_config = copy.deepcopy(env_param2range)
env_config['worker_id'] = a_i+offset_worker_id
for k in env_config:
if k == 'tower-seed':
env_config[k] = tower_seed
continue
'''
elif k == 'allowed-floors':
env_config[k] = allowed_floors
continue
'''
if isinstance(env_config[k], list):
v = random.choice(env_config[k])
env_config[k] = v
env_configs.append(env_config)
return env_configs
def test_train_ppo_rnd(ppo_rnd_config_dict_ma):
global gif_interval
task = parse_environment('MontezumaRevenge-v0',
nbr_parallel_env=ppo_rnd_config_dict_ma['nbr_actor'],
nbr_frame_stacking=ppo_rnd_config_dict_ma['nbr_frame_stacking'])
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_cnn80phi256gru128_a4_b256_h128_1e-4_OTC_frameskip4/'
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_cnn80phi256gru128_a8_b128_h128_3e-4_OTC_frameskip4/'
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_NormRetMeanStd_cnn80phi256gru128_a8_b128_h128_3e-4_MZ_frameskip4/'
logdir = './test_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicTrueGAE_NormRetMeanStd_cnn80phi256gru256_ac128_a32_b1024_h128_3e-4_MZ_frameskip4/'
#logdir = './test_gif'
if not os.path.exists(logdir):
os.mkdir(logdir)
sum_writer = SummaryWriter(logdir)
save_path = os.path.join(logdir,'./ppo_rnd.agent')
agent, offset_episode_count = check_path_for_agent(save_path)
if agent is None: agent = build_PPO_Agent(config=ppo_rnd_config_dict_ma, task=task, agent_name='PPO_RND_MZ')
regym.rl_algorithms.PPO.ppo.summary_writer = sum_writer
agent.save_path = save_path
nbr_episodes = 1e7
max_episode_length = 1e5
nbr_actors = ppo_rnd_config_dict_ma['nbr_actor']
env_param2range = { 'tower-seed': list(range(-1,101)), #Sets the seed used to generate the tower. -1 corresponds to a random tower on every reset() call.
'starting-floor': 0, #list(range(100)), #Sets the starting floor for the agent on reset().
'total-floors': 10, #list(range(1, 100)) #Sets the maximum number of possible floors in the tower.
'dense-reward': 0, #(0, 1) #Whether to use the sparse (0) or dense (1) reward function.
'lighting-type': [0, 1, 2], #Whether to use no realtime light (0), a single realtime light with minimal color variations (1), or a realtime light with large color variations (2).
'visual-theme': 0, #[0, 1, 2], #Whether to use only the default-theme (0), the normal ordering or themes (1), or a random theme every floor (2).
'agent-perspective':1, #(0, 1), #Whether to use first-person (0) or third-person (1) perspective for the agent.
'allowed-rooms': 2, #(0, 1, 2), #Whether to use only normal rooms (0), normal and key rooms (1), or normal, key, and puzzle rooms (2).
'allowed-modules': 2, #(0, 1, 2), #Whether to fill rooms with no modules (0), only easy modules (1), or the full range of modules (2).
'allowed-floors': 0, #[0, 1, 2], #Whether to include only straightforward floor layouts (0), layouts that include branching (1), or layouts that include branching and circling (2).
'default-theme': 1 #[0, 1, 2, 3, 4] #Whether to set the default theme to Ancient (0), Moorish (1), Industrial (2), Modern (3), or Future (4).
}
# PARAMETERS with curriculum since they only include straightforward floors...
env_configs = update_configs(env_param2range, nbr_actors)
for i in tqdm(range(offset_episode_count, int(nbr_episodes))):
trajectory = rl_loop.run_episode_parallel(task.env, agent,
training=True,
max_episode_length=max_episode_length,
)#env_configs=env_configs)
total_return = [ sum([ exp[2] for exp in t]) for t in trajectory]
mean_total_return = sum( total_return) / len(trajectory)
std_ext_return = math.sqrt( sum( [math.pow( r-mean_total_return ,2) for r in total_return]) / len(total_return) )
total_int_return = [ sum([ exp[3] for exp in t]) for t in trajectory]
mean_total_int_return = sum( total_int_return) / len(trajectory)
std_int_return = math.sqrt( sum( [math.pow( r-mean_total_int_return ,2) for r in total_int_return]) / len(total_int_return) )
for idx, (ext_ret, int_ret) in enumerate(zip(total_return, total_int_return)):
sum_writer.add_scalar('Training/TotalReturn', ext_ret, i*len(trajectory)+idx)
sum_writer.add_scalar('Training/TotalIntReturn', int_ret, i*len(trajectory)+idx)
sum_writer.add_scalar('Training/StdIntReturn', std_int_return, i)
sum_writer.add_scalar('Training/StdExtReturn', std_ext_return, i)
episode_lengths = [ len(t) for t in trajectory]
mean_episode_length = sum( episode_lengths) / len(trajectory)
std_episode_length = math.sqrt( sum( [math.pow( l-mean_episode_length ,2) for l in episode_lengths]) / len(trajectory) )
sum_writer.add_scalar('Training/MeanTotalReturn', mean_total_return, i)
sum_writer.add_scalar('Training/MeanTotalIntReturn', mean_total_int_return, i)
sum_writer.add_scalar('Training/MeanEpisodeLength', mean_episode_length, i)
sum_writer.add_scalar('Training/StdEpisodeLength', std_episode_length, i)
# Update configs:
env_configs = update_configs(env_param2range, nbr_actors)
agent.episode_count += 1
if (i+nbr_actors)%gif_interval == 0:
for actor_idx in range(nbr_actors):
gif_traj = [ exp[0] for exp in trajectory[actor_idx]]
gif_data = [ exp[3] for exp in trajectory[actor_idx]]
begin = time.time()
#make_gif(gif_traj, episode=i, actor_idx=actor_idx, path=logdir)
make_gif_with_graph(gif_traj, gif_data, episode=i, actor_idx=actor_idx, path=logdir)
end = time.time()
eta = end-begin
print(f'Time: {eta} sec.')
task.env.close()
assert trajectory is not None
assert isinstance(trajectory, list)
if __name__ == '__main__':
# https://pytorch.org/docs/master/multiprocessing.html#multiprocessing-cuda-sharing-details
torch.multiprocessing.set_start_method('forkserver')
test_train_ppo_rnd(ppo_rnd_config_dict_ma())
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import unittest
from skbio import TreeNode
from skbio.io import NewickFormatError
from skbio.io.format.newick import (
_newick_to_tree_node, _tree_node_to_newick, _newick_sniffer)
class TestNewick(unittest.TestCase):
def _assert_node_equal(self, n1, n2):
self.assertEqual(n1.name, n2.name)
self.assertEqual(n1.length, n2.length)
self.assertEqual(len(n1.children), len(n2.children))
def _assert_equal(self, n1, n2):
def name(x):
return (str(x.name),
float(x.length) if x.length is not None else 0,
len(x.children))
self._assert_node_equal(n1, n2)
for c1, c2 in zip(sorted(n1.children, key=name),
sorted(n2.children, key=name)):
self.assertTrue(c1.parent is n1)
self.assertTrue(c2.parent is n2)
self._assert_equal(c1, c2)
def _setup_tree(self, kwargs_list):
trees = []
for kwargs in kwargs_list:
trees.append(TreeNode(**kwargs))
trees[4].extend([trees[2], trees[3]])
trees[5].extend([trees[0], trees[1], trees[4]])
return trees[5]
def _setup_linked_list(self, kwargs_list):
last_node = None
for idx, kwargs in enumerate(kwargs_list):
new_node = TreeNode(**kwargs)
if last_node is not None:
new_node.append(last_node)
last_node = new_node
return last_node
def _setup_balanced_binary(self, kwargs_list):
trees = []
for kwargs in kwargs_list:
trees.append(TreeNode(**kwargs))
trees[0].extend([trees[2], trees[3]])
trees[1].extend([trees[4], trees[5]])
trees[6].extend([trees[0], trees[1]])
return trees[6]
def setUp(self):
# Using the factory functions above, we will construct different tree
# instances. Each tree is expected to serialize to the first newick
# string in the list. Each string in the list is expected to
# deserialize into an equivilent rotation of the constructed instance.
tree_blank = (self._setup_tree([
{}, {}, {}, {}, {}, {}
]), [
"(,,(,));\n",
"(,(,),);",
"((,),,);",
" ((,[ this is a comment ]) , , ) ; ",
"((,[ i_can_do_this[0] or escape unmatched '[ ]),[more words],);",
])
tree_leaves_named = (self._setup_tree([
{'name': 'a_'},
{'name': 'b'},
{'name': 'c'},
{'name': 'd'},
{},
{}
]), [
"('a_',b,(c,d));\n",
"(b,(c,d),'a_');",
"(b\n,'a_'\n ,(d \t,c) ) ;",
])
tree_all_named = (self._setup_tree([
{'name': 'a'},
{'name': 'b'},
{'name': 'c'},
{'name': '[whaaat!\']'},
{'name': 'e'},
{'name': 'f'}
]), [
"(a,b,(c,'[whaaat!'']')e)f;\n",
"(b,(c,'[whaaat!'']')e,a)f;",
"(b,[comment] \na,('[whaaat!'']',c)e)f;",
])
tree_all_but_root_distances = (self._setup_tree([
{'length': 0.1},
{'length': 0.2},
{'length': 0.3},
{'length': 0.4},
{'length': 0.5},
{}
]), [
"(:0.1,:0.2,(:0.3,:0.4):0.5);\n",
"(:0.2,(:0.3,:0.4):0.5,:0.1);",
"(:0.2,:0.1,(:0.4,:0.3):0.5);",
])
tree_all_distances = (self._setup_tree([
{'length': 0.1},
{'length': 0.2},
{'length': 0.3},
{'length': 0.4},
{'length': 0.5},
{'length': 0.0}
]), [
"(:0.1,:0.2,(:0.3,:0.4):0.5):0.0;\n",
"(:0.2,(:0.3,:0.4):0.5,:0.1):0.0;",
"(:0.2,\n:0.1,(:0.4,\n:0.3):0.5)\n:0.0;",
])
tree_all_leaves_named_with_distances = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de d', 'length': 0.4},
{'length': 0.5},
{'length': 0.0}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de_d:0.4):0.5):0.0;\n",
"('b_a''':0.2,(c:0.3,'de d':0.4):0.5,a:0.1):0.0;",
"('b_a''':0.2,a:0.1,('de d'[why not]:0.4,c:0.3):0.5):0.0;",
])
tree_all_leaves_named_with_distances_no_root = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de d', 'length': 0.4},
{'length': 0.5},
{}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de__d:0.4):0.5);\n",
"('b_a''':0.2\n[comment ahoy]\n,(c:0.3,'de d':0.4):0.5,a:0.1);",
"('b_a''':0.2,a:0.1,(de__d:0.4,c:0.3):0.5);"
])
tree_all = (self._setup_tree([
{'name': 'a', 'length': 0.1},
{'name': 'b_a\'', 'length': 0.2},
{'name': 'c', 'length': 0.3},
{'name': 'de\' d', 'length': 0.4},
{'name': 'e', 'length': 0.5},
{'name': 'f', 'length': 0.0}
]), [
"(a:0.1,'b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5)f:0.0;\n",
"('b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5,a:0.1)f:0.0;",
"((de''_d:0.4, c:0.3)e:0.5, 'b_a''':0.2, a:0.1)f:0.0;"
])
balanced_blank = (self._setup_balanced_binary([
{}, {}, {}, {}, {}, {}, {}
]), [
"((,),(,));\n",
])
balanced_named = (self._setup_balanced_binary([
{'name': 'a'},
{'name': 'b'},
{'name': 'c'},
{'name': 'd'},
{'name': 'e'},
{'name': 'f'},
{'name': 'g'}
]), [
"((c,d)a,(e,f)b)g;\n",
])
balanced_distances = (self._setup_balanced_binary([
{'length': 1.0},
{'length': 2.0},
{'length': 3.0},
{'length': 4.0},
{'length': 5.0},
{'length': 6.0},
{'length': 0.0}
]), [
"((:3.0,:4.0):1.0,(:5.0,:6.0):2.0):0.0;\n",
])
blanaced_all = (self._setup_balanced_binary([
{'name': 'a', 'length': 1.0},
{'name': 'b', 'length': 2.0},
{'name': 'c', 'length': 3.0},
{'name': 'd', 'length': 4.0},
{'name': 'e', 'length': 5.0},
{'name': 'f:f\'f', 'length': 6.0},
{'name': 'g', 'length': 0.0}
]), [
"((c:3.0,d:4.0)a:1.0,(e:5.0,'f:f''f':6.0)b:2.0)g:0.0;\n",
])
linked_list_blank = (self._setup_linked_list([
{}, {}, {}, {}, {}
]), [
"(((())));\n",
"[(((())));](((())));",
"[[(((())));](((())));](((())));\t\t\n"
])
linked_list_named = (self._setup_linked_list([
{'name': 'aaa'},
{'name': 'b_a\''},
{'name': 'c'},
{'name': 'de d'},
{'name': 'e'},
]), [
"((((aaa)'b_a''')c)de_d)e;\n"
])
inked_list_distances = (self._setup_linked_list([
{'length': 0.4},
{'length': 0.3},
{'length': 0.2},
{'length': 0.1},
{'length': 0.0},
]), [
"((((:0.4):0.3):0.2):0.1):0.0;\n",
"((((:0.4)[not a label]:0.3):0.2):0.1):0.0;\t\t\n"
])
linked_list_all = (self._setup_linked_list([
{'name': 'a', 'length': 0.4},
{'name': 'b_a\'', 'length': 0.3},
{'name': 'c', 'length': 0.2},
{'name': 'de d', 'length': 0.1},
{'name': 'eee', 'length': 0.0},
]), [
"((((a:0.4)'b_a''':0.3)c:0.2)de_d:0.1)eee:0.0;\n"
])
single_empty = (TreeNode(), [";\n", "[comment about the root"
" and its properties];"])
single_named = (TreeNode(name='athing'), ["athing;\n"])
single_distance = (TreeNode(length=200.0), [":200.0;\n"])
single_all = (TreeNode(name='[a]', length=200.0), ["'[a]':200.0;\n"])
self.trees_newick_lists = [
tree_blank,
tree_leaves_named,
tree_all_named,
tree_all_but_root_distances,
tree_all_distances,
tree_all_leaves_named_with_distances,
tree_all_leaves_named_with_distances_no_root,
tree_all,
balanced_blank,
balanced_named,
balanced_distances,
blanaced_all,
linked_list_blank,
linked_list_named,
inked_list_distances,
linked_list_all,
single_empty,
single_named,
single_distance,
single_all
]
# Invalid newick strings and list of error fragments that should be
# a part of the error message when read.
self.invalid_newicks = [
("", ['root']),
("This is not a newick file.", ['whitespace', 'label']),
("((();", ['Parenthesis', 'unbalanced']),
("(,,,)(,);\n", ['unnested', 'children']),
("(()());", ['unnested', 'children']),
("(():,,)", ['length']),
("[][[]('comment is the gotcha':0.2,,);", ['unbalanced', 'root']),
("#SampleID\tHeaderA\tHeaderB\n0\t'yellow'\t0.45;", ['whitespace',
'label']),
("))();", ['Parenthesis', 'unbalanced']),
("((,,),((,,));", ['Parenthesis', 'unbalanced']),
("\n".join([",".join(str(i) for i in range(100))
for _ in range(100)]), ['whitespace', 'label'])
]
def test_newick_to_tree_node_valid_files(self):
for tree, newicks in self.trees_newick_lists:
for newick in newicks:
fh = io.StringIO(newick)
read_tree = _newick_to_tree_node(fh)
self._assert_equal(tree, read_tree)
fh.close()
def test_newick_to_tree_node_invalid_files(self):
for invalid, error_fragments in self.invalid_newicks:
fh = io.StringIO(invalid)
with self.assertRaises(NewickFormatError) as cm:
_newick_to_tree_node(fh)
for frag in error_fragments:
self.assertIn(frag, str(cm.exception))
fh.close()
def test_tree_node_to_newick(self):
for tree, newicks in self.trees_newick_lists:
newick = newicks[0]
fh = io.StringIO()
_tree_node_to_newick(tree, fh)
self.assertEqual(newick, fh.getvalue())
fh.close()
def test_roundtrip(self):
for tree, newicks in self.trees_newick_lists:
newick = newicks[0]
fh = io.StringIO(newick)
tree = _newick_to_tree_node(fh)
fh2 = io.StringIO()
_tree_node_to_newick(tree, fh2)
fh2.seek(0)
tree2 = _newick_to_tree_node(fh2)
self.assertEqual(newick, fh2.getvalue())
self._assert_equal(tree, tree2)
fh.close()
fh2.close()
def test_newick_to_tree_node_convert_underscores(self):
fh = io.StringIO('(_:0.1, _a, _b)__;')
tree = _newick_to_tree_node(fh, convert_underscores=False)
fh2 = io.StringIO()
_tree_node_to_newick(tree, fh2)
self.assertEqual(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
fh2.close()
fh.close()
def test_newick_sniffer_valid_files(self):
for _, newicks in self.trees_newick_lists:
for newick in newicks:
fh = io.StringIO(newick)
self.assertEqual(_newick_sniffer(fh), (True, {}))
fh.close()
def test_newick_sniffer_invalid_files(self):
for invalid, _ in self.invalid_newicks:
fh = io.StringIO(invalid)
self.assertEqual(_newick_sniffer(fh), (False, {}))
fh.close()
if __name__ == '__main__':
unittest.main()
|
"""
weasyprint.tests.test_draw.test_tables
--------------------------------------
Test how tables are drawn.
# TODO: add note on when to use
direction: rtl on body or on table.
"""
import pytest
from ...html import HTML_HANDLERS
from ..testing_utils import assert_no_logs, requires
from . import as_pixel, assert_pixels, parse_pixels
PIX_BY_CHAR_OVERRIDES = {
# rgba(255, 0, 0, 0.5) above #fff
'r': as_pixel(b'\xff\x7f\x7f\xff'),
# rgba(0, 255, 0, 0.5) above #fff
'g': as_pixel(b'\x7f\xff\x7f\xff'),
# r above B above #fff.
'b': as_pixel(b'\x80\x00\x7f\xff'),
}
def to_pix(pixels_str):
return parse_pixels(pixels_str, PIX_BY_CHAR_OVERRIDES)
# TODO: refactor colspan/rowspan into CSS:
# td, th { column-span: attr(colspan integer) }
HTML_HANDLERS['x-td'] = HTML_HANDLERS['td']
HTML_HANDLERS['x-th'] = HTML_HANDLERS['th']
tables_source = '''
<style>
@page { size: 28px; background: #fff }
x-table { margin: 1px; padding: 1px; border-spacing: 1px;
border: 1px solid transparent }
x-td { width: 2px; height: 2px; padding: 1px;
border: 1px solid transparent }
%(extra_css)s
</style>
<x-table>
<x-colgroup>
<x-col></x-col>
<x-col></x-col>
</x-colgroup>
<x-col></x-col>
<x-tbody>
<x-tr>
<x-td></x-td>
<x-td rowspan=2></x-td>
<x-td></x-td>
</x-tr>
<x-tr>
<x-td colspan=2></x-td>
<x-td></x-td>
</x-tr>
</x-tbody>
<x-tr>
<x-td></x-td>
<x-td></x-td>
</x-tr>
</x-table>
'''
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_1():
assert_pixels('table_borders', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__rrrrrr_r____r_rrrrrr__B_
_B_________r____r_________B_
_B__rrrrrrrSrrrrS_rrrrrr__B_
_B__r______r____S_r____r__B_
_B__r______r____S_r____r__B_
_B__r______r____S_r____r__B_
_B__r______r____S_r____r__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B________________________B_
_B__rrrrrr_rrrrrr_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__rrrrrr_rrrrrr_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_1_rtl():
assert_pixels('table_borders_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__r____r_r____r_r____r__B_
_B__rrrrrr_r____r_rrrrrr__B_
_B_________r____r_________B_
_B__rrrrrr_SrrrrSrrrrrrr__B_
_B__r____r_S____r______r__B_
_B__r____r_S____r______r__B_
_B__r____r_S____r______r__B_
_B__r____r_S____r______r__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B________________________B_
_B_________rrrrrr_rrrrrr__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________rrrrrr_rrrrrr__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_2():
assert_pixels('table_collapsed_borders', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBB_________
_BBBBBBBBBBBBBBBBBB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BBrrrrr____rrrrrBB_________
_BB_________r____BB_________
_BB_________r____BB_________
_BB_________r____BB_________
_BB_________r____BB_________
_BBrrrrrrrrrrrrrrBB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BB____r____r____BB_________
_BBBBBBBBBBBBBBBBBB_________
_BBBBBBBBBBBBBBBBBB_________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
'''), tables_source % {'extra_css': '''
x-table { border: 2px solid #00f; table-layout: fixed;
border-collapse: collapse }
x-td { border-color: #ff7f7f }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_2_rtl():
assert_pixels('table_collapsed_borders_rtl', 28, 28, to_pix('''
____________________________
_________BBBBBBBBBBBBBBBBBB_
_________BBBBBBBBBBBBBBBBBB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BBrrrrr____rrrrrBB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BBrrrrrrrrrrrrrrBB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BBBBBBBBBBBBBBBBBB_
_________BBBBBBBBBBBBBBBBBB_
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
'''), tables_source % {'extra_css': '''
body { direction: rtl; }
x-table { border: 2px solid #00f; table-layout: fixed;
border-collapse: collapse; }
x-td { border-color: #ff7f7f }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_3():
assert_pixels('table_collapsed_borders_paged', 28, 52, to_pix('''
____________________________
_gggggggggggggggggggggggggg_
_g________________________g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BBrrrrr____rrrrrBB_____g_
_g_BB_________r____BB_____g_
_g_BB_________r____BB_____g_
_g_BB_________r____BB_____g_
_g_BB_________r____BB_____g_
_g_BBrrrrrrrrrrrrrrBB_____g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
____________________________
_gggggggggggggggggggggggggg_
_g_BBrrrrrrrrrrrrrrBB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BB____r____r____BB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g_BBBBBBBBBBBBBBBBBB_____g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border: solid #00f; border-width: 8px 2px;
table-layout: fixed; border-collapse: collapse }
x-td { border-color: #ff7f7f }
@page { size: 28px 26px; margin: 1px;
border: 1px solid rgba(0, 255, 0, 0.5); }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_3_rtl():
assert_pixels('table_collapsed_borders_paged_rtl', 28, 52, to_pix('''
____________________________
_gggggggggggggggggggggggggg_
_g________________________g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BBrrrrr____rrrrrBB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BBrrrrrrrrrrrrrrBB_g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
____________________________
_gggggggggggggggggggggggggg_
_g_____BBrrrrrrrrrrrrrrBB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
'''), tables_source % {'extra_css': '''
body { direction: rtl; }
x-table { border: solid #00f; border-width: 8px 2px;
table-layout: fixed; border-collapse: collapse; }
x-td { border-color: #ff7f7f }
@page { size: 28px 26px; margin: 1px;
border: 1px solid rgba(0, 255, 0, 0.5); }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_4():
assert_pixels('table_td_backgrounds', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B_________rrrrrr_________B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B__rrrrrrrSSSSSS_rrrrrr__B_
_B________________________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed }
x-td { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_4_rtl():
assert_pixels('table_td_backgrounds_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B__rrrrrr_rrrrrr_rrrrrr__B_
_B_________rrrrrr_________B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B__rrrrrr_SSSSSSrrrrrrr__B_
_B________________________B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-td { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_5():
assert_pixels('table_row_backgrounds', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B_________bbbbbb_________B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B__bbbbbbbpppppp_bbbbbb__B_
_B________________________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B__rrrrrr_rrrrrr_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed }
x-tbody { background: rgba(0, 0, 255, 1) }
x-tr { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_5_rtl():
assert_pixels('table_row_backgrounds_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B_________bbbbbb_________B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B__bbbbbb_ppppppbbbbbbb__B_
_B________________________B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B_________rrrrrr_rrrrrr__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-tbody { background: rgba(0, 0, 255, 1) }
x-tr { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_6():
assert_pixels('table_column_backgrounds', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B__bbbbbb_bbbbbb_rrrrrr__B_
_B_________bbbbbb_________B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B________________________B_
_B__bbbbbb_bbbbbb_________B_
_B__bbbbbb_bbbbbb_________B_
_B__bbbbbb_bbbbbb_________B_
_B__bbbbbb_bbbbbb_________B_
_B__bbbbbb_bbbbbb_________B_
_B__bbbbbb_bbbbbb_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;}
x-colgroup { background: rgba(0, 0, 255, 1) }
x-col { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_6_rtl():
assert_pixels('table_column_backgrounds_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B__rrrrrr_bbbbbb_bbbbbb__B_
_B_________bbbbbb_________B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B________________________B_
_B_________bbbbbb_bbbbbb__B_
_B_________bbbbbb_bbbbbb__B_
_B_________bbbbbb_bbbbbb__B_
_B_________bbbbbb_bbbbbb__B_
_B_________bbbbbb_bbbbbb__B_
_B_________bbbbbb_bbbbbb__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-colgroup { background: rgba(0, 0, 255, 1) }
x-col { background: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_7():
assert_pixels('table_borders_and_row_backgrounds', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bbbbbb_bBBBBb_bbbbbb__B_
_B_________bBBBBb_________B_
_B__rrrrrrrpbbbbp_rrrrrr__B_
_B__r______bBBBBp_r____r__B_
_B__r______bBBBBp_r____r__B_
_B__r______bBBBBp_r____r__B_
_B__r______bBBBBp_r____r__B_
_B__rrrrrrrpppppp_rrrrrr__B_
_B________________________B_
_B__rrrrrr_rrrrrr_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__r____r_r____r_________B_
_B__rrrrrr_rrrrrr_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed }
x-tr:first-child { background: blue }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_7_rtl():
assert_pixels('table_borders_and_row_backgrounds_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_bbbbbb_bbbbbb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bBBBBb_bBBBBb_bBBBBb__B_
_B__bbbbbb_bBBBBb_bbbbbb__B_
_B_________bBBBBb_________B_
_B__rrrrrr_pbbbbprrrrrrr__B_
_B__r____r_pBBBBb______r__B_
_B__r____r_pBBBBb______r__B_
_B__r____r_pBBBBb______r__B_
_B__r____r_pBBBBb______r__B_
_B__rrrrrr_pppppprrrrrrr__B_
_B________________________B_
_B_________rrrrrr_rrrrrr__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________r____r_r____r__B_
_B_________rrrrrr_rrrrrr__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-tr:first-child { background: blue }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_8():
assert_pixels('table_borders_and_column_backgrounds', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__bbbbbb_rrrrrr_rrrrrr__B_
_B__bBBBBb_r____r_r____r__B_
_B__bBBBBb_r____r_r____r__B_
_B__bBBBBb_r____r_r____r__B_
_B__bBBBBb_r____r_r____r__B_
_B__bbbbbb_r____r_rrrrrr__B_
_B_________r____r_________B_
_B__bbbbbbbpbbbbp_rrrrrr__B_
_B__bBBBBBBbBBBBp_r____r__B_
_B__bBBBBBBbBBBBp_r____r__B_
_B__bBBBBBBbBBBBp_r____r__B_
_B__bBBBBBBbBBBBp_r____r__B_
_B__bbbbbbbpppppp_rrrrrr__B_
_B________________________B_
_B__bbbbbb_rrrrrr_________B_
_B__bBBBBb_r____r_________B_
_B__bBBBBb_r____r_________B_
_B__bBBBBb_r____r_________B_
_B__bBBBBb_r____r_________B_
_B__bbbbbb_rrrrrr_________B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed }
x-col:first-child { background: blue }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_8_rtl():
assert_pixels('table_borders_and_column_backgrounds_rtl', 28, 28, to_pix('''
____________________________
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
_B________________________B_
_B________________________B_
_B__rrrrrr_rrrrrr_bbbbbb__B_
_B__r____r_r____r_bBBBBb__B_
_B__r____r_r____r_bBBBBb__B_
_B__r____r_r____r_bBBBBb__B_
_B__r____r_r____r_bBBBBb__B_
_B__rrrrrr_r____r_bbbbbb__B_
_B_________r____r_________B_
_B__rrrrrr_pbbbbpbbbbbbb__B_
_B__r____r_pBBBBbBBBBBBb__B_
_B__r____r_pBBBBbBBBBBBb__B_
_B__r____r_pBBBBbBBBBBBb__B_
_B__r____r_pBBBBbBBBBBBb__B_
_B__rrrrrr_ppppppbbbbbbb__B_
_B________________________B_
_B_________rrrrrr_bbbbbb__B_
_B_________r____r_bBBBBb__B_
_B_________r____r_bBBBBb__B_
_B_________r____r_bBBBBb__B_
_B_________r____r_bBBBBb__B_
_B_________rrrrrr_bbbbbb__B_
_B________________________B_
_B________________________B_
_BBBBBBBBBBBBBBBBBBBBBBBBBB_
____________________________
'''), tables_source % {'extra_css': '''
x-table { border-color: #00f; table-layout: fixed;
direction: rtl; }
x-col:first-child { background: blue }
x-td { border-color: rgba(255, 0, 0, 0.5) }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_9():
assert_pixels('collapsed_border_thead', 22, 36, '''
______________________
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
__R_____R____R_____R__
__R_____R____R_____R__
__R_____R____R_____R__
__RRRRRRRRRRRRRRRRRR__
__R_____R____R_____R__
__R_____R____R_____R__
__R_____R____R_____R__
__RRRRRRRRRRRRRRRRRR__
______________________
______________________
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
__R_____R____R_____R__
__R_____R____R_____R__
__R_____R____R_____R__
__RRRRRRRRRRRRRRRRRR__
______________________
______________________
______________________
______________________
______________________
______________________
______________________
''', '''
<style>
@page { size: 22px 18px; margin: 1px; background: #fff }
td { border: 1px red solid; width: 4px; height: 3px; }
</style>
<table style="table-layout: fixed; border-collapse: collapse">
<thead style="border: blue solid; border-width: 2px 3px;
"><td></td><td></td><td></td></thead>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>''')
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_10():
assert_pixels('collapsed_border_tfoot', 22, 36, '''
______________________
__RRRRRRRRRRRRRRRRRR__
__R_____R____R_____R__
__R_____R____R_____R__
__R_____R____R_____R__
__RRRRRRRRRRRRRRRRRR__
__R_____R____R_____R__
__R_____R____R_____R__
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
______________________
______________________
______________________
______________________
__RRRRRRRRRRRRRRRRRR__
__R_____R____R_____R__
__R_____R____R_____R__
__R_____R____R_____R__
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBB____R____R____BBB_
_BBBBBBBBBBBBBBBBBBBB_
_BBBBBBBBBBBBBBBBBBBB_
______________________
______________________
______________________
______________________
______________________
______________________
''', '''
<style>
@page { size: 22px 18px; margin: 1px; background: #fff }
td { border: 1px red solid; width: 4px; height: 3px; }
</style>
<table style="table-layout: fixed; margin-left: 1px;
border-collapse: collapse">
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
<tfoot style="border: blue solid; border-width: 2px 3px;
"><td></td><td></td><td></td></tfoot>''')
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_11():
# Regression test for inline table with collapsed border and alignment
# rendering borders incorrectly
# https://github.com/Kozea/WeasyPrint/issues/82
assert_pixels('inline_text_align', 20, 10, '''
____________________
________RRRRRRRRRRR_
________R____R____R_
________R____R____R_
________R____R____R_
________RRRRRRRRRRR_
____________________
____________________
____________________
____________________
''', '''
<style>
@page { size: 20px 10px; margin: 1px; background: #fff }
body { text-align: right; font-size: 0 }
table { display: inline-table; width: 11px }
td { border: 1px red solid; width: 4px; height: 3px }
</style>
<table style="table-layout: fixed; border-collapse: collapse">
<tr><td></td><td></td></tr>''')
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_12():
assert_pixels('table_collapsed_borders', 28, 28, to_pix('''
____________________________
_________BBBBBBBBBBBBBBBBBB_
_________BBBBBBBBBBBBBBBBBB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BBrrrrr____rrrrrBB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BB____r_________BB_
_________BBrrrrrrrrrrrrrrBB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BB____r____r____BB_
_________BBBBBBBBBBBBBBBBBB_
_________BBBBBBBBBBBBBBBBBB_
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
'''), tables_source % {'extra_css': '''
body { direction: rtl }
x-table { border: 2px solid #00f; table-layout: fixed;
border-collapse: collapse }
x-td { border-color: #ff7f7f }
'''})
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_13():
assert_pixels('table_collapsed_borders_paged', 28, 52, to_pix('''
____________________________
_gggggggggggggggggggggggggg_
_g________________________g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BBrrrrr____rrrrrBB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BB____r_________BB_g_
_g_____BBrrrrrrrrrrrrrrBB_g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
____________________________
_gggggggggggggggggggggggggg_
_g_____BBrrrrrrrrrrrrrrBB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BB____r____r____BB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g_____BBBBBBBBBBBBBBBBBB_g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_g________________________g_
_gggggggggggggggggggggggggg_
____________________________
'''), tables_source % {'extra_css': '''
body { direction: rtl }
x-table { border: solid #00f; border-width: 8px 2px;
table-layout: fixed; border-collapse: collapse }
x-td { border-color: #ff7f7f }
@page { size: 28px 26px; margin: 1px;
border: 1px solid rgba(0, 255, 0, 0.5); }
'''})
@pytest.mark.xfail
@assert_no_logs
@requires('cairo', (1, 12, 0))
def test_tables_14():
assert_pixels('table_background_column_paged', 28, 52, to_pix('''
____________________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_RRR_RRR_RRR________________
_____RRR____________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
_RRRRRRR_RRR________________
____________________________
____________________________
____________________________
____________________________
____________________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
_RRR_RRR____________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
____________________________
'''), tables_source % {'extra_css': '''
@page { size: 28px 26px }
x-table { margin: 0; padding: 0; border: 0 }
x-col { background: red }
x-td { padding: 0; width: 1px; height: 8px }
'''})
|
'''
Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
This will just hold some common functions used for testing.
Created on Aug 15, 2014
@author: dfleck
'''
from twisted.internet import defer,reactor
from twisted.python import log
from gmu.chord.ChordNode import ChordNode
from gmu.chord.GmuServerFactory import GmuServerFactory
from gmu.chord.MetricsMessageObserver import MetricsMessageObserver
from gmu.chord.CopyEnvelope import CopyEnvelope
from gmu.chord.FingerEntry import FingerEntry
from gmu.chord import Config, Utils, ClassIDFactory
from SampleClient import SampleClient
from gmu.netclient.classChordNetworkChord import classChordNetworkChord
import datetime, random, hashlib
from TestMessageObserver import TestMessageObserver
import os, psutil
import socket
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
AD = "-"
AF_INET6 = getattr(socket, 'AF_INET6', object())
proto_map = {
(AF_INET, SOCK_STREAM): 'tcp',
(AF_INET6, SOCK_STREAM): 'tcp6',
(AF_INET, SOCK_DGRAM): 'udp',
(AF_INET6, SOCK_DGRAM): 'udp6',
}
loggingOn = False
import sys
def printLogger(aDict):
print(aDict)
sys.stdout.flush()
@defer.inlineCallbacks
def waitForConnectionCache(_=None):
if Config.USE_CONNECTION_CACHE:
log.msg("Waiting for ConnectionCache to close out...")
yield wait(Config.CONNECTION_CACHE_DELAY + 2)
def generateID(ipAddr, port, theEnclave, classID):
'''Generates the node's ID from it's IP addr/port.
The enclave is added on as the high order bits.
The final ID looks like enclaveBits | uniq ID bits
So, if the enclave is 0xFF11 and the uniq ID is 0x12345 theId=0xFF1112345
'''
if classID == None:
# Build a random class spec
classID = ClassIDFactory.generateID()
h = hashlib.new('sha1')
h.update(ipAddr)
h.update(str(port))
# Grab the bits from the ip/port hash
ipPortBits = bin(int(h.hexdigest(), 16))
ipPortInt = int(ipPortBits, 2)
theId = Utils.generateNodeID(ipPortInt, theEnclave, classChars=classID)
return theId
@defer.inlineCallbacks
def startupBootstrapNode(ip, port=12345, enclave='localhost', classID=None):
'''Start a Bootstrap node'''
# Generate an ID
nodeID = generateID(ip, port, enclave, classID)
print("DEBUG: BS nodeID is %s" % nodeID)
bsNode = ChordNode(ip, port, nodeID)
serverFactory = GmuServerFactory(bsNode, unsafeTracebacks=True)
MetricsMessageObserver(bsNode)
testObserver = TestMessageObserver(bsNode)
enableAutoDiscovery = False
status = yield bsNode.join(None, enableAutoDiscovery, enclave, None, True, serverFactory)
defer.returnValue( (status, bsNode, testObserver) )
@defer.inlineCallbacks
def startupClientNode(ip, port, enclave, bootstrapNodeLocation, allowFloatingIP=True, classID=None):
'''Start a client node and connect it to the network.
Returns (status=True|False, node)
'''
# Generate an ID
nodeID = generateID(ip, port, enclave, classID)
print("DEBUG: Client nodeID is %s" % nodeID)
normalNode = ChordNode(ip, port, nodeID, allowFloatingIP)
MetricsMessageObserver(normalNode)
testObserver = TestMessageObserver(normalNode)
enableAutoDiscovery = False
#import TestUtils
#yield TestUtils.wait(4)
authenticationPayload = "open-sesame"
status = yield normalNode.join([ bootstrapNodeLocation ] , enableAutoDiscovery, enclave, authenticationPayload, False)
defer.returnValue( (status, normalNode, testObserver) )
@defer.inlineCallbacks
def startNodeUsingAPI(ip, port, bootstrapNodeLocation, enclaveStr, useAutoDiscover, isBootstrapNode):
'''Starts up a client node but uses the API.
Once complete returns a tuple (joinStatus, clientAPI, networkAPI)
Return a deferred which should fire after join succeeds.
'''
d = defer.Deferred()
clientAPI = SampleClient(ip, port, None)
networkAPI = classChordNetworkChord(clientAPI, port, ip)
nodeID = networkAPI.generateNodeID(str(port), enclaveStr) # Get the ID with the bits on it we need. Use "port" because it'll be uniq for tests
# Join the network
if bootstrapNodeLocation is None:
bootstrapNodeList = None
else:
bootstrapNodeList = [ bootstrapNodeLocation ]
callFunc = lambda result, payload: shouldSucceedCallback(result, payload, d)
networkAPI.start(callFunc, nodeID, enclaveStr, "authenticate:succeed", bootstrapNodeList, isBootstrapNode, useAutoDiscover)
# Wait for the join to finish
joinStatus = yield d # This is the value returned from shoudlSucceedCallback
# Now return everything
defer.returnValue( (joinStatus, clientAPI, networkAPI) )
def shouldSucceedCallback(result, payload, deferToFire):
'''Status callback from the networkAPI.start call. Should be true.
Uses a lambda function so we can sneak in a few more parameters :-)
'''
if result:
deferToFire.callback(result)
else:
deferToFire.errback(result)
def defWait(dummy, seconds=5):
return wait(seconds)
def wait(seconds=5):
d = defer.Deferred()
print("Waiting for %d seconds..." % seconds)
# simulate a delayed result by asking the reactor to schedule
# gotResults in 2 seconds time
reactor.callLater(seconds, d.callback, True)
return d
def sendFlood(chordNode,messageNum,enclave, data=""):
'''Send a flooding message to the enclave specified. Content is "messageNum".
Returns a deferred status
'''
# Send out a flooding message
msgText = { "type" : "STORE", "loc" : chordNode.nodeLocation, "msgNum" : messageNum, "data" : data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = chordNode.nodeLocation
env['type'] = 'flood'
env['enclave'] = enclave # Flooding
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = chordNode.sendFloodingMessage(msgText, env)
return d
def sendP2P(src, dst, messageNum, data=""):
'''Send a P2P message to the dst node specified. Content is "messageNum".
Returns a defered status
'''
# Send out a flooding message
msgText = { "type" : "STORE", "loc" : src.nodeLocation, "msgNum" : messageNum, "data": data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = src.nodeLocation
env['type'] = 'p2p'
env['destination'] = dst.nodeLocation.id
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = src.sendSyncMessage(msgText, env)
return d
def sendClassQuery(src, classSpec, messageNum, data=""):
'''Send a Class query message to the class spec specified. Content is "messageNum".
Returns a deferred status
'''
# Send out a class message
msgText = { "type" : "STORE", "loc" : src.nodeLocation, "msgNum" : messageNum, "data": data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = src.nodeLocation
env['type'] = 'classType'
env['destination'] = classSpec
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = src.sendClassMessage(msgText, env)
return d
def didNodeReceive(observers, node, messageNum):
'''Return True if nodes received the message, False otherwise.'''
for testObserver in observers:
if testObserver.chordNode == node:
return testObserver.messageNumStored(messageNum)
log.err("didNodeReceive: could not find observer.")
return False
def didReceive(observers, enclave, messageNum, expectedCount):
'''Return True if all nodes received the message, False otherwise.'''
actualCount = 0
for testObserver in observers:
if enclave == 'ALL' or enclave in testObserver.chordNode.remote_getEnclaveNames():
if testObserver.messageNumStored(messageNum):
actualCount += 1
log.err("didReceive: actualCount:%d expectedCount:%d" % (actualCount, expectedCount))
return actualCount == expectedCount
def didNotReceive(observers, enclave, messageNum, expectedCount):
'''Return True if all nodes did not receive the message, False otherwise.'''
actualCount = 0
for testObserver in observers:
if enclave == 'ALL' or enclave in testObserver.chordNode.remote_getEnclaveNames():
if not testObserver.messageNumStored(messageNum):
actualCount += 1
if actualCount != expectedCount:
log.err("didNotReceive: actualCount:%d expectedCount:%d" % (actualCount, expectedCount))
return actualCount == expectedCount
def showOpenConnections():
# Get my PID
pid = os.getpid()
# Run the lsof command
p = psutil.Process(pid)
conns = p.connections()
templ = "%-5s %-30s %-30s %-13s %-6s "
print(templ % (
"Proto", "Local address", "Remote address", "Status", "PID" ))
# Print output
for c in conns:
laddr = "%s:%s" % (c.laddr)
raddr = ""
if c.raddr:
raddr = "%s:%s" % (c.raddr)
print(templ % (
proto_map[(c.family, c.type)],
laddr,
raddr or AD,
c.status,
pid or AD
))
print("Total net connections: %d" % len(conns))
|
import unittest
from spaced_repetition.domain.domain_helpers import validate_param
class TestDomainHelpers(unittest.TestCase):
def test_validate_input(self):
self.assertEqual(validate_param(param='valid name',
max_length=10),
'valid name')
def test_validate_input_raises_wrong_type(self):
with self.assertRaises(TypeError) as context:
validate_param(param=3, max_length=10)
self.assertEqual(str(context.exception),
f"Input should by of type 'str', but is of type {int}")
def test_validate_input_raises_empty(self):
with self.assertRaises(ValueError) as context:
validate_param(param='', max_length=10)
self.assertEqual(str(context.exception),
"Input cannot be empty.")
def test_validate_input_raises_too_long(self):
with self.assertRaises(ValueError) as context:
validate_param(param='12345', max_length=4, label='TestLabel')
self.assertEqual(str(context.exception),
"TestLabel too long, max length = 4 chars.")
def test_validate_name_raises_whitespace_on_end(self):
with self.assertRaises(ValueError) as context:
validate_param(param=' before', max_length=10, label='TestLabel')
self.assertEqual(str(context.exception),
"Error: TestLabel ' before' has whitespace on either end.")
with self.assertRaises(ValueError) as context:
validate_param(param='after ', max_length=10, label='TestLabel')
self.assertEqual(str(context.exception),
"Error: TestLabel 'after ' has whitespace on either end.")
|
from __future__ import absolute_import
from django.views.generic import View
from sentry.models import (GroupSubscriptionReason, Organization, Project)
from sentry.utils.http import absolute_uri
from .mail import MailPreview
class DebugNewProcessingIssuesEmailView(View):
reprocessing_active = True
def get(self, request):
from sentry.plugins.sentry_mail.activity.new_processing_issues import summarize_issues
org = Organization(
id=1,
slug='organization',
name='My Company'
)
project = Project(
id=1,
organization=org,
slug='project',
name='My Project',
)
return MailPreview(
html_template='sentry/emails/activity/new_processing_issues.html',
text_template='sentry/emails/activity/new_processing_issues.txt',
context={
'project':
project,
'reason':
GroupSubscriptionReason.descriptions[GroupSubscriptionReason.processing_issue],
'issues':
summarize_issues(
[
{
'data': {
'image_arch':
'arm64',
'image_path':
'/var/containers/Bundle/Application/FB14D416-DE4E-4224-9789-6B88E9C42601/CrashProbeiOS.app/CrashProbeiOS',
'image_uuid':
'a2df1794-e0c7-371c-baa4-93eac340a78a'
},
'object': 'dsym:a2df1794-e0c7-371c-baa4-93eac340a78a',
'scope': 'native',
'type': 'native_missing_dsym'
},
{
'data': {
'image_arch':
'arm64',
'image_path':
'/var/containers/Bundle/Application/FB14D416-DE4E-4224-9789-6B88E9C42601/CrashProbeiOS.app/libCrashProbeiOS',
'image_uuid':
'12dc1b4c-a01b-463f-ae88-5cf0c31ae680'
},
'object': 'dsym:12dc1b4c-a01b-463f-ae88-5cf0c31ae680',
'scope': 'native',
'type': 'native_bad_dsym'
},
]
),
'reprocessing_active':
self.reprocessing_active,
'info_url':
absolute_uri('/{}/{}/settings/processing-issues/'.format(
org.slug,
project.slug,
)),
},
).render(request)
class DebugNewProcessingIssuesNoReprocessingEmailView(DebugNewProcessingIssuesEmailView):
reprocessing_active = False
|
import json
input_file=open('rawdata.json', 'r')
rawdata_decode=json.load(input_file)
lst=[]
output_file=open('rawdata_filtered.json', 'w')
whitelist = open("whitelist.json", "r")
whitelist_decode=json.load(whitelist)
#whitelist_stack = whitelist_decode.split()
#print whitelist_stack
for i in rawdata_decode:
print i
for x in whitelist_decode:
if(i['senderid']==x['senderid']):
lst.append(i)
json.dump(lst,output_file,indent=4)
input_file.close()
output_file.close()
whitelist.close()
|
from _runtime import server, CONFIG
from fastapi import FastAPI, Request, Body, Response, status
from fastapi.responses import HTMLResponse, StreamingResponse, FileResponse
from fastapi_utils.tasks import repeat_every
import uvicorn
import rsa
import os
import sys
import hashlib
from pydantic import BaseModel, create_model
from typing import Optional
from util import *
from classes import *
import urllib
import logging
import base64
import json
import random
import time
import pickle
from endpoints import server_endpoint, client_endpoint, compendium_endpoint, character_endpoint, campaign_endpoint, image_endpoint, player_endpoint
from _api import *
from threading import Thread
from markdown2 import Markdown
# Configs
VERSION = 0
logger = logging.getLogger("uvicorn.error")
'''if os.path.exists('prikey.pem') and os.path.exists('pubkey.pem'):
try:
logger.info('Loading RSA keys from PEM files.')
with open('pubkey.pem','rb') as pub:
PUBLIC_KEY = rsa.PublicKey.load_pkcs1(pub.read())
with open('prikey.pem','rb') as pri:
PRIVATE_KEY = rsa.PrivateKey.load_pkcs1(pri.read())
except:
logger.warning('Error loading old keys. Generating new ones.')
PUBLIC_KEY, PRIVATE_KEY = rsa.newkeys(1024,accurate=True)
with open('pubkey.pem','wb') as pub:
pub.write(PUBLIC_KEY.save_pkcs1())
with open('prikey.pem','wb') as pri:
pri.write(PRIVATE_KEY.save_pkcs1())
else:
logger.info('Generating new RSA keys.')
PUBLIC_KEY, PRIVATE_KEY = rsa.newkeys(1024,accurate=True)
with open('pubkey.pem','wb') as pub:
pub.write(PUBLIC_KEY.save_pkcs1())
with open('prikey.pem','wb') as pri:
pri.write(PRIVATE_KEY.save_pkcs1())'''
def ep_reload(endpoint):
try:
data = get5e_direct(endpoint)
with open(os.path.join('database','cached','open5e',endpoint+'.json'),'w') as f:
json.dump(data,f)
#logger.info('Reloaded '+endpoint)
except:
logger.warning('Open5e Endpoint '+endpoint+' is not accessible.')
def reload_open5e_cache(endpoints=['spells','monsters','sections','magicitems']):
threads = []
for endpoint in endpoints:
ep_reload(endpoint)
return threads
# Setup
'''Build database'''
folders = ['users','sessions','campaigns','characters','cached',os.path.join('cached','open5e'),'images']
for f in folders:
try:
os.makedirs(os.path.join('database',f))
with open(os.path.join('database',f,'registry.json'),'w') as reg:
reg.write('{}')
except FileExistsError:
pass
'''reload_open5e_cache()
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
logger.info('Reloaded Open5e Cache.')'''
'''Get OpenAPI configs'''
with open(os.path.join('config','openapi.json'),'r') as c:
openapicfg = json.load(c)
tags_meta = openapicfg['metadata']
# App
# Instantiate server instance - todo add stateful cache
app = FastAPI(openapi_tags=tags_meta)
# Routers
app.include_router(
server_endpoint.router,
prefix='/server',
tags=['server']
)
app.include_router(
client_endpoint.router,
prefix='/client/{fingerprint}',
tags=['client']
)
app.include_router(
compendium_endpoint.router,
prefix='/compendium',
tags=['compendium']
)
app.include_router(
character_endpoint.router,
prefix='/characters/{fingerprint}',
tags=['characters']
)
app.include_router(
campaign_endpoint.router,
prefix='/campaigns/{fingerprint}',
tags=['campaigns']
)
app.include_router(
image_endpoint.router,
prefix='/images',
tags=['images']
)
app.include_router(
player_endpoint.router,
prefix='/campaigns/{fingerprint}/player/{campaign}/{map}',
tags=['player']
)
@app.get('/', response_class=HTMLResponse, include_in_schema=False) # Get index.html when navigated to root
async def groot():
with open(os.path.join('client','index.html'),'r') as f:
return f.read()
@app.get('/characters', response_class=HTMLResponse, include_in_schema=False)
async def gchars():
with open(os.path.join('client','characters.html'),'r') as f:
return f.read()
@app.get('/campaigns', response_class=HTMLResponse, include_in_schema=False)
async def gcamps():
with open(os.path.join('client','campaigns.html'),'r') as f:
return f.read()
@app.get('/help', response_class=HTMLResponse, include_in_schema=False)
async def ghelp():
with open(os.path.join('client','help.html'),'r') as f:
return f.read()
@app.get('/player', response_class=HTMLResponse, include_in_schema=False)
async def ghelp():
with open(os.path.join('client','player.html'),'r') as f:
return f.read()
# Load web server
files = list(os.walk('client'))
slashtype = '/'
aux = '/'
if sys.platform == 'win32':
slashtype = '\\'
aux = '\\\\'
web_paths = []
for f in files:
split_path = f[0].split(slashtype)
if len(split_path) > 1:
new_path = '/'.join(split_path[1:])+'/'
else:
new_path = ''
dirpath = aux.join(f[0].split(slashtype))
for fn in f[2]:
ext = os.path.splitext(fn)[1]
code = '\n'.join([
'@app.get("/'+new_path+fn+'", include_in_schema=False)',
'async def web_'+fn.replace('.','_').replace('-','_').replace(' ','_').replace('\'','').replace('"','')+'():',
'\treturn FileResponse("'+dirpath+aux+fn+'")'
])
web_paths.append(new_path+fn)
exec(
code,
globals(),
locals()
)
logger.info(f'Loaded {len(web_paths)} static files.')
@app.get('/static/')
async def get_static_file_paths():
global web_paths
return web_paths
def fix():
for _f in [os.path.join('database','campaigns',i) for i in os.listdir(os.path.join('database','campaigns')) if i.endswith('.pkl')]:
with open(_f,'rb') as f:
obj = pickle.load(f)
for m in obj.maps.keys():
if not 'chat' in obj.maps[m].keys():
obj.maps[m]['chat'] = []
nchat = []
if (not eval(CONFIG['RUNTIME']['clear_chat'])):
for c in obj.maps[m]['chat']:
if type(c) == dict:
nchat.append(c)
obj.maps[m]['chat'] = nchat[:]
if not 'initiative' in obj.maps[m].keys():
obj.maps[m]['initiative'] = {
'running':False,
'order':{},
'current':None,
'started':False
}
load_user(obj.owner)
if not obj.id in server.users[obj.owner].owned_campaigns:
server.users[obj.owner].owned_campaigns.append(obj.id)
cache_user(obj.owner)
with open(_f,'wb') as f:
pickle.dump(obj,f)
# Start tasks
@app.on_event('startup')
async def load_users():
reg = get_user_registry()
for u in reg.keys():
if os.path.exists(os.path.join('database','users',u+'.pkl')):
server.users[u] = os.path.join('database','users',u+'.pkl')
fix()
# Load periodic functions
@app.on_event('startup') # Run on startup
@repeat_every(seconds=5) # Run on startup
async def check_connections_task(): # Task to check whether connections have timed out
newconn = {}
oldconn = server.connections.copy() # Create copy of old connections dictionary
for conn in oldconn.keys(): # Iterate through connection IDs
if oldconn[conn].timeout >= time.time(): # If not timed out
newconn[conn] = oldconn[conn] # Add to new dict
else:
logger.info('Timed out connection '+conn)
cache_user(server.connections[conn].uid) # Cache the user object to a pickle file
server.connections = newconn.copy() # Replace the old connections dictionary with the new one
@app.on_event('startup') # Run on startup
@repeat_every(seconds=120) # Run every 2 minutes
async def reload_cached():
if not os.path.exists(os.path.join('database','cached','open5e','last_update.ini')):
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
t = Thread(target=reload_open5e_cache)
t.start()
else:
with open(os.path.join('database','cached','open5e','last_update.ini'),'r') as f:
dat = f.read()
if dat == '':
dat = 0
if int(dat)+600 < time.time() or dat == '':
t = Thread(target=reload_open5e_cache)
t.start()
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
if __name__ == "__main__":
uvicorn.run('main:app', host=CONFIG['RUNTIME']['server_ip'], port=int(CONFIG['RUNTIME']['server_port']), log_level="info", access_log=False)
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..legacy import antsIntroduction
def test_antsIntroduction_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
bias_field_correction=dict(
argstr="-n 1",
),
dimension=dict(
argstr="-d %d",
position=1,
usedefault=True,
),
environ=dict(
nohash=True,
usedefault=True,
),
force_proceed=dict(
argstr="-f 1",
),
input_image=dict(
argstr="-i %s",
copyfile=False,
extensions=None,
mandatory=True,
),
inverse_warp_template_labels=dict(
argstr="-l",
),
max_iterations=dict(
argstr="-m %s",
sep="x",
),
num_threads=dict(
nohash=True,
usedefault=True,
),
out_prefix=dict(
argstr="-o %s",
usedefault=True,
),
quality_check=dict(
argstr="-q 1",
),
reference_image=dict(
argstr="-r %s",
copyfile=True,
extensions=None,
mandatory=True,
),
similarity_metric=dict(
argstr="-s %s",
),
transformation_model=dict(
argstr="-t %s",
usedefault=True,
),
)
inputs = antsIntroduction.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_antsIntroduction_outputs():
output_map = dict(
affine_transformation=dict(
extensions=None,
),
input_file=dict(
extensions=None,
),
inverse_warp_field=dict(
extensions=None,
),
output_file=dict(
extensions=None,
),
warp_field=dict(
extensions=None,
),
)
outputs = antsIntroduction.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
#!/bin/env python3
import sys
import json
import math
from difflib import SequenceMatcher
def load_file(file):
with open(file, "r") as f:
return json.load(f)
def check_title(actual, expected):
if not "title" in actual.keys():
if not "title" in expected:
return 20
return 0
if not "title" in expected:
expected["title"] = ""
similarity = SequenceMatcher(None, actual["title"], expected["title"]).ratio()
return 20 * similarity
def check_versions(actual, expected):
if not "versions" in actual:
if not "versions" in expected:
return 20
return 0
if not "versions" in expected:
expected["versions"] = {}
actual = actual["versions"]
expected = expected["versions"]
if len(expected) == 0:
if len(actual) == 0:
return 20
return 0
max_score = len(expected) * 4
score = 0
for key in expected:
if not key in actual:
continue
score += 1
if len(expected[key]) == len(actual[key]):
score += 1
set_e = set(expected[key])
set_a = set(actual[key])
score += 2 * len(set_e.intersection(set_a)) / len(set_e)
return 20 * score / max_score
def check_toc(actual, expected):
if not "table_of_contents" in actual:
if not "table_of_contents" in expected:
return 20
return 0
if not "table_of_contents" in expected:
expected["table_of_contents"] = []
actual = list(filter(lambda x: len(x) == 3, actual["table_of_contents"]))
expected = expected["table_of_contents"]
if len(expected) == 0:
if len(actual) == 0:
return 20
return 0
max_score = 5 * len(expected)
score = 0
actual_numbers = list(map(lambda x: x[0], actual))
expected_numbers = list(map(lambda x: x[0], expected))
score += len(expected) * SequenceMatcher(None, actual_numbers, expected_numbers).ratio()
actual_sections = list(map(lambda x: x[1], actual))
expected_sections = list(map(lambda x: x[1], expected))
score += len(expected) * SequenceMatcher(None, actual_sections, expected_sections).ratio()
actual_pages = list(map(lambda x: x[2], actual))
expected_pages = list(map(lambda x: x[2], expected))
score += len(expected) * SequenceMatcher(None, actual_pages, expected_pages).ratio()
for item in actual:
if item in expected:
score += 2
return 20 * score / max_score
def check_revisions(actual, expected):
if not "revisions" in actual:
if not "revisions" in expected:
return 20
return 0
if not "revisions" in expected:
expected["revisions"] = []
actual = list(filter(lambda x: set(x) == {"version", "date", "description"}, actual["revisions"]))
expected = expected["revisions"]
if len(expected) == 0:
if len(actual) == 0:
return 20
return 0
max_score = 5 * len(expected)
score = 0
actual_versions = list(map(lambda x: x["version"], actual))
expected_versions = list(map(lambda x: x["version"], expected))
score += len(expected) * SequenceMatcher(None, actual_versions, expected_versions).ratio()
actual_dates = list(map(lambda x: x["date"], actual))
expected_dates = list(map(lambda x: x["date"], expected))
score += len(expected) * SequenceMatcher(None, actual_dates, expected_dates).ratio()
actual_descriptions = list(map(lambda x: x["description"], actual))
expected_descriptions = list(map(lambda x: x["description"], expected))
score += len(expected) * SequenceMatcher(None, actual_descriptions, expected_descriptions).ratio()
for item in actual:
if item in expected:
score += 2
return 20 * score / max_score
def check_bibliography(actual, expected):
if not "bibliography" in actual:
if not "bibliography" in expected:
return 20
return 0
if not "bibliography" in expected:
expected["bibliography"] = {}
actual = actual["bibliography"]
expected = expected["bibliography"]
if len(expected) == 0:
if len(actual) == 0:
return 20
return 0
max_score = 2 * len(expected)
score = 0
for key in actual:
if key not in expected:
continue
score += 1
score += SequenceMatcher(None, actual[key], expected[key]).ratio()
return 20 * score / max_score
def main():
expected = load_file(sys.argv[1])
actual = load_file(sys.argv[2])
verbose = False
if len(sys.argv) == 4:
verbose = True if sys.argv[3] == "--verbose" else False
checks = (check_title, check_versions, check_toc, check_revisions, check_bibliography)
check_results = []
points = 0
for check in checks:
res = check(actual, expected)
points += res
check_results.append(res)
print(math.ceil(points))
if verbose:
print("\n------------------------------------")
print(f"Detailed results for {sys.argv[2]}:")
print(f"Title - {check_results[0]}")
print(f"Versions - {check_results[1]}")
print(f"Table of contents - {check_results[2]}")
print(f"Revisions - {check_results[3]}")
print(f"Bibliography - {check_results[4]}")
print("------------------------------------")
if __name__ == "__main__":
if len(sys.argv) < 3:
print(f"USAGE: {sys.argv[0]} <reference_json> <output_json>", file=sys.stderr)
sys.exit(1)
main()
|
# --------------------------------------------------------
# Visual Detection: State-of-the-Art
# Copyright: Hanbo Zhang
# Licensed under The MIT License [see LICENSE for details]
# Written by Hanbo
# based on code from Jiasen Lu, Jianwei Yang, Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pprint
import time
import torch.nn as nn
import pickle
from torch.utils.data.sampler import Sampler
import matplotlib
matplotlib.use('Agg')
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import *
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, parse_args, read_cfgs
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient, gradient_norm
from model.utils.data_viewer import dataViewer
from model.utils.blob import image_unnormalize
from model.FasterRCNN import fasterRCNN
from model.FPN import FPN
from model.SSD import SSD
from model.FasterRCNN_VMRN import fasterRCNN_VMRN
from model.FCGN import FCGN
from model.SSD_VMRN import SSD_VMRN
from model.MGN import MGN
from model.AllinOne import All_in_One
import model.VAM as VAM
from model.utils.net_utils import objdet_inference, grasp_inference, objgrasp_inference, rel_prob_to_mat
from datasets.factory import get_imdb
import warnings
torch.set_default_tensor_type(torch.FloatTensor)
# implemented-algorithm list
LEGAL_FRAMES = {"faster_rcnn", "ssd", "fpn", "faster_rcnn_vmrn", "ssd_vmrn", "all_in_one", "fcgn", "mgn", "vam"}
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_batch * batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_batch).view(-1, 1) * self.batch_size
self.rand_num = rand_num.expand(self.num_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
def makeCudaData(data_list):
for i, data in enumerate(data_list):
data_list[i] = data.cuda()
return data_list
def init_network(args, n_cls):
"""
:param args: define hyperparameters
:param n_cls: number of object classes for initializing network output layers
:return:
"""
# initilize the network here.'
if args.frame == 'faster_rcnn':
conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
Network = fasterRCNN(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv' + conv_num,), pretrained=True)
elif args.frame == 'faster_rcnn_vmrn':
conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
Network = fasterRCNN_VMRN(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv' + conv_num,), pretrained=True)
elif args.frame == 'fpn':
Network = FPN(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv2', 'conv3', 'conv4', 'conv5'), pretrained=True)
elif args.frame == 'fcgn':
conv_num = str(int(np.log2(cfg.FCGN.FEAT_STRIDE[0])))
Network = FCGN(feat_name=args.net, feat_list=('conv' + conv_num,), pretrained=True)
elif args.frame == 'mgn':
conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
Network = MGN(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv' + conv_num,), pretrained=True)
elif args.frame == 'all_in_one':
conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
Network = All_in_One(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv' + conv_num,), pretrained=True)
elif args.frame == 'ssd':
Network = SSD(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv3', 'conv4'), pretrained=True)
elif args.frame == 'ssd_vmrn':
Network = SSD_VMRN(n_cls, class_agnostic=args.class_agnostic, feat_name=args.net,
feat_list=('conv3', 'conv4'), pretrained=True)
elif args.frame == 'vam':
if args.net == 'vgg16':
Network = VAM.vgg16(n_cls, pretrained=True)
elif args.net == 'res50':
Network = VAM.resnet(n_cls, layer_num=50, pretrained=True)
elif args.net == 'res101':
Network = VAM.resnet(n_cls, layer_num=101, pretrained=True)
else:
print("network is not defined")
pdb.set_trace()
else:
print("frame is not defined")
pdb.set_trace()
if args.frame in {'ssd_vmrn', 'faster_rcnn_vmrn'} and cfg.TRAIN.VMRN.FIX_OBJDET:
Network.create_architecture(cfg.TRAIN.VMRN.OBJ_MODEL_PATH)
elif args.frame in {'mgn', 'all_in_one'} and cfg.MGN.FIX_OBJDET:
Network.create_architecture(cfg.MGN.OBJ_MODEL_PATH)
else:
Network.create_architecture()
lr = args.lr
# tr_momentum = cfg.TRAIN.COMMON.MOMENTUM
# tr_momentum = args.momentum
args.start_epoch = 1
if args.resume:
output_dir = args.save_dir + "/" + args.dataset + "/" + args.net
load_name = os.path.join(output_dir,
args.frame + '_{}_{}_{}.pth'.format(args.checksession, args.checkepoch,
args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
Network.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.RCNN_COMMON.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.iter_per_epoch is not None:
Network.iter_counter = (args.checkepoch - 1) * args.iter_per_epoch + args.checkpoint
print("start iteration:", Network.iter_counter)
if args.cuda:
Network.cuda()
if len(args.mGPUs) > 0:
gpus = [int(i) for i in args.mGPUs.split('')]
Network = nn.DataParallel(Network, gpus)
params = []
for key, value in dict(Network.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.COMMON.DOUBLE_BIAS + 1),
'weight_decay': cfg.TRAIN.COMMON.BIAS_DECAY and cfg.TRAIN.COMMON.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.COMMON.WEIGHT_DECAY}]
# init optimizer
if args.optimizer == "adam":
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.COMMON.MOMENTUM)
if args.resume:
optimizer.load_state_dict(checkpoint['optimizer'])
return Network, optimizer
def detection_filter(all_boxes, all_grasp=None, max_per_image=100):
# Limit to max_per_image detections *over all classes*
image_scores = np.hstack([all_boxes[j][:, -1]
for j in xrange(1, len(all_boxes))])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, len(all_boxes)):
keep = np.where(all_boxes[j][:, -1] >= image_thresh)[0]
all_boxes[j] = all_boxes[j][keep, :]
if all_grasp is not None:
all_grasp[j] = all_grasp[j][keep, :]
if all_grasp is not None:
return all_boxes, all_grasp
else:
return all_boxes
def vis_gt(data_list, visualizer, frame, train_mode=False):
im_vis = image_unnormalize(data_list[0].permute(1, 2, 0).cpu().numpy())
# whether to visualize training data
if not train_mode:
im_vis = cv2.resize(im_vis, None, None, fx=1. / data_list[1][3].item(), fy=1. / data_list[1][2].item(),
interpolation=cv2.INTER_LINEAR)
if frame in {"fpn", "faster_rcnn", "ssd"}:
im_vis = visualizer.draw_objdet(im_vis, data_list[2].cpu().numpy())
elif frame in {"ssd_vmrn", "vam", "faster_rcnn_vmrn"}:
im_vis = visualizer.draw_objdet(im_vis, data_list[2].cpu().numpy(), o_inds=np.arange(data_list[3].item()))
im_vis = visualizer.draw_mrt(im_vis, data_list[4].cpu().numpy(), rel_score=data_list[5])
elif frame in {"fcgn"}:
im_vis = visualizer.draw_graspdet(im_vis, data_list[2].cpu().numpy())
elif frame in {"all_in_one"}:
# TODO: visualize manipulation relationship tree
im_vis = visualizer.draw_graspdet_with_owner(im_vis, data_list[2].cpu().numpy(),
data_list[3].cpu().numpy(), data_list[7].cpu().numpy())
im_vis = visualizer.draw_mrt(im_vis, data_list[6].cpu().numpy())
elif frame in {"mgn"}:
im_vis = visualizer.draw_graspdet_with_owner(im_vis, data_list[2].cpu().numpy(),
data_list[3].cpu().numpy(), data_list[6].cpu().numpy())
else:
raise RuntimeError
return im_vis
def evaluate_model(Network, namedb, args):
max_per_image = 100
# load test dataset
imdb, roidb, ratio_list, ratio_index, cls_list = combined_roidb(namedb, False)
if args.frame in {"fpn", "faster_rcnn"}:
dataset = fasterrcnnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"ssd"}:
dataset = ssdbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"ssd_vmrn", "vam"}:
dataset = svmrnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"faster_rcnn_vmrn"}:
dataset = fvmrnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"fcgn"}:
dataset = fcgnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"all_in_one"}:
dataset = fallinonebatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
elif args.frame in {"mgn"}:
dataset = roignbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=False, cls_list=cls_list, augmentation=False)
else:
raise RuntimeError
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
data_iter = iter(dataloader)
num_images = len(roidb)
output_dir = args.save_dir + "/" + args.dataset + "/" + args.net
if args.vis:
visualizer = dataViewer(cls_list)
data_vis_dir = os.path.join(args.save_dir, args.dataset, 'data_vis', 'test')
if not os.path.exists(data_vis_dir):
os.makedirs(data_vis_dir)
id_number_to_name = {}
for r in roidb:
id_number_to_name[r["img_id"]] = r["image"]
start = time.time()
# init variables
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(len(cls_list))]
all_rel = []
all_grasp = [[[] for _ in xrange(num_images)]
for _ in xrange(len(cls_list))]
Network.eval()
empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))
for i in range(num_images):
data_batch = next(data_iter)
if args.cuda:
data_batch = makeCudaData(data_batch)
det_tic = time.time()
# forward process
if args.frame == 'faster_rcnn' or args.frame == 'fpn':
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, net_loss_cls, net_loss_bbox, rois_label = Network(
data_batch)
boxes = rois[:, :, 1:5]
elif args.frame in {'ssd'}:
bbox_pred, cls_prob, net_loss_bbox, net_loss_cls = Network(data_batch)
boxes = Network.priors.type_as(bbox_pred).unsqueeze(0)
elif args.frame == 'faster_rcnn_vmrn':
rois, cls_prob, bbox_pred, rel_result, rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, RCNN_rel_loss_cls, reg_loss, rois_label = Network(data_batch)
boxes = rois[:, :, 1:5]
all_rel.append(rel_result)
elif args.frame == 'ssd_vmrn' or args.frame == 'vam':
bbox_pred, cls_prob, rel_result, loss_bbox, loss_cls, rel_loss_cls, reg_loss = Network(data_batch)
boxes = Network.priors.type_as(bbox_pred)
all_rel.append(rel_result)
elif args.frame == 'fcgn':
bbox_pred, cls_prob, loss_bbox, loss_cls, rois_label, boxes = Network(data_batch)
elif args.frame == 'mgn':
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, loss_cls, loss_bbox, rois_label, grasp_loc, grasp_prob, \
grasp_bbox_loss, grasp_cls_loss, grasp_conf_label, grasp_all_anchors = Network(data_batch)
boxes = rois[:, :, 1:5]
elif args.frame == 'all_in_one':
rois, cls_prob, bbox_pred, rel_result, rpn_loss_cls, rpn_loss_box, loss_cls, loss_bbox, rel_loss_cls, reg_loss, rois_label, \
grasp_loc, grasp_prob, grasp_bbox_loss, grasp_cls_loss, grasp_conf_label, grasp_all_anchors = Network(
data_batch)
boxes = rois[:, :, 1:5]
all_rel.append(rel_result)
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
# collect results
if args.frame in {'ssd', 'fpn', 'faster_rcnn', 'faster_rcnn_vmrn', 'ssd_vmrn', 'vam'}:
# detected_box is a list of boxes. len(list) = num_classes
det_box = objdet_inference(cls_prob[0].data, bbox_pred[0].data, data_batch[1][0].data,
box_prior=boxes[0].data, class_agnostic=args.class_agnostic, for_vis=False)
if args.vis:
if args.frame not in {'faster_rcnn_vmrn', 'ssd_vmrn', 'vam'}:
# for object detection algorithms
vis_boxes = objdet_inference(cls_prob[0].data, bbox_pred[0].data, data_batch[1][0].data,
box_prior=boxes[0].data, class_agnostic=args.class_agnostic,
for_vis=True)
data_list = [data_batch[0][0], data_batch[1][0], torch.Tensor(vis_boxes)]
else:
# for visual manipulation relationship detection algorithms
det_res = all_rel[-1]
if det_res[0].shape[0] > 0:
vis_boxes = torch.cat([det_res[0], det_res[1].unsqueeze(1)], dim=1)
else:
vis_boxes = torch.Tensor([])
rel_mat, rel_score = rel_prob_to_mat(det_res[2], vis_boxes.size(0))
data_list = [data_batch[0][0], data_batch[1][0], vis_boxes,
torch.Tensor([vis_boxes.size(0)]), torch.Tensor(rel_mat), torch.Tensor(rel_score)]
if max_per_image > 0:
det_box = detection_filter(det_box, None, max_per_image)
for j in xrange(1, len(det_box)):
all_boxes[j][i] = det_box[j]
elif args.frame in {'mgn'}:
det_box, det_grasps = objgrasp_inference(cls_prob[0].data,
bbox_pred[0].data if bbox_pred is not None else bbox_pred,
grasp_prob.data, grasp_loc.data, data_batch[1][0].data,
boxes[0].data,
class_agnostic=args.class_agnostic,
g_box_prior=grasp_all_anchors.data, for_vis=False, topN_g=1)
if args.vis:
vis_boxes, vis_grasps = objgrasp_inference(cls_prob[0].data,
bbox_pred[0].data if bbox_pred is not None else bbox_pred,
grasp_prob.data, grasp_loc.data, data_batch[1][0].data,
boxes[0].data,
class_agnostic=args.class_agnostic,
g_box_prior=grasp_all_anchors.data, for_vis=True, topN_g=3)
if vis_boxes.shape[0] > 0:
g_inds = torch.Tensor(np.arange(vis_boxes.shape[0])).unsqueeze(1).repeat(1, vis_grasps.shape[1])
else:
g_inds = torch.Tensor([])
data_list = [data_batch[0][0], data_batch[1][0], torch.Tensor(vis_boxes),
torch.Tensor(vis_grasps).view(-1, vis_grasps.shape[-1]), None, None,
g_inds.long().view(-1)]
if max_per_image > 0:
det_box, det_grasps = detection_filter(det_box, det_grasps, max_per_image)
for j in xrange(1, len(det_box)):
all_boxes[j][i] = det_box[j]
all_grasp[j][i] = det_grasps[j]
elif args.frame in {'all_in_one'}:
# detected_box is a list of boxes. len(list) = num_classes
det_box, det_grasps = objgrasp_inference(cls_prob[0].data,
bbox_pred[0].data if bbox_pred is not None else bbox_pred,
grasp_prob.data, grasp_loc.data, data_batch[1][0].data,
boxes[0].data,
class_agnostic=args.class_agnostic,
g_box_prior=grasp_all_anchors.data, for_vis=False, topN_g=1)
if args.vis:
# for visual manipulation relationship detection algorithms
det_res = all_rel[-1]
vis_boxes, vis_grasps = objgrasp_inference(cls_prob[0].data,
bbox_pred[0].data if bbox_pred is not None else bbox_pred,
grasp_prob.data, grasp_loc.data, data_batch[1][0].data,
boxes[0].data,
class_agnostic=args.class_agnostic,
g_box_prior=grasp_all_anchors.data, for_vis=True, topN_g=3)
if vis_boxes.shape[0] > 0:
g_inds = torch.Tensor(np.arange(vis_boxes.shape[0])).unsqueeze(1).repeat(1, vis_grasps.shape[1])
else:
g_inds = torch.Tensor([])
rel_mat, rel_score = rel_prob_to_mat(det_res[2], vis_boxes.shape[0])
data_list = [data_batch[0][0], data_batch[1][0], torch.Tensor(vis_boxes),
torch.Tensor(vis_grasps).view(-1, vis_grasps.shape[-1]),
torch.Tensor([vis_boxes.shape[0]]), None,
torch.Tensor(rel_mat), g_inds.long().view(-1), torch.Tensor(rel_score)]
if max_per_image > 0:
det_box, det_grasps = detection_filter(det_box, det_grasps, max_per_image)
for j in xrange(1, len(det_box)):
all_boxes[j][i] = det_box[j]
all_grasp[j][i] = det_grasps[j]
elif args.frame in {'fcgn'}:
det_grasps = grasp_inference(cls_prob[0].data, bbox_pred[0].data, data_batch[1][0].data,
box_prior=boxes[0].data, topN=1)
all_grasp[1][i] = det_grasps
if args.vis:
data_list = [data_batch[0][0], data_batch[1][0], torch.Tensor(det_grasps)]
else:
raise RuntimeError("Illegal algorithm.")
if args.vis:
im_vis = vis_gt(data_list, visualizer, args.frame)
# img_name = id_number_to_name[data_batch[1][0][4].item()].split("/")[-1]
img_name = str(int(data_batch[1][0][4].item())) + ".jpg"
# When using cv2.imwrite, channel order should be BGR
cv2.imwrite(os.path.join(data_vis_dir, img_name), im_vis[:, :, ::-1])
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
print('Evaluating detections')
result = {}
if args.frame in {'fcgn'} or 'cornell' in args.dataset or 'jacquard' in args.dataset:
result["Acc_grasp"] = imdb.evaluate_detections(all_grasp, output_dir)
result["Main_Metric"] = result["Acc_grasp"]
else:
with open("det_res.pkl", "wb") as f:
pickle.dump(all_boxes, f)
result["mAP"] = imdb.evaluate_detections(all_boxes, output_dir)
if args.frame in {'ssd', 'faster_rcnn', 'fpn'}:
result["Main_Metric"] = result["mAP"]
if args.frame in {'mgn', "all_in_one"}:
# when using mgn in single-object grasp dataset, we only use accuracy to measure the performance instead of mAP.
if 'cornell' in args.dataset or 'jacquard' in args.dataset:
pass
else:
print('Evaluating grasp detection results')
oag = False if Network.use_objdet_branch else True
grasp_MRFPPI, mean_MRFPPI, key_point_MRFPPI, mAPgrasp = \
imdb.evaluate_multigrasp_detections(all_boxes, all_grasp, object_class_agnostic=oag)
print('Mean Log-Average Miss Rate: %.4f' % np.mean(np.array(mean_MRFPPI)))
result["mAP_grasp"] = mAPgrasp
if args.frame == 'mgn':
result["Main_Metric"] = mAPgrasp
if args.frame in {"faster_rcnn_vmrn", "ssd_vmrn", "all_in_one"}:
print('Evaluating relationships')
orec, oprec, imgprec, imgprec_difobjnum = imdb.evaluate_relationships(all_rel)
print("object recall: \t%.4f" % orec)
print("object precision:\t%.4f" % oprec)
print("image acc: \t%.4f" % imgprec)
print("image acc for images with different object numbers (2,3,4,5):")
print("%s\t%s\t%s\t%s\t" % tuple(imgprec_difobjnum))
result["Obj_Recall_Rel"] = orec
result["Obj_Precision_Rel"] = oprec
result["Img_Acc_Rel"] = imgprec
result["Main_Metric"] = imgprec
# TODO: implement all_in_one's metric for evaluation
end = time.time()
print("test time: %0.4fs" % (end - start))
return result
def train():
# check cuda devices
if not torch.cuda.is_available():
assert RuntimeError("Training can only be done by GPU. Please use --cuda to enable training.")
if torch.cuda.is_available() and not args.cuda:
assert RuntimeError("You have a CUDA device, so you should probably run with --cuda")
# init random seed
np.random.seed(cfg.RNG_SEED)
# init logger
# TODO: RESUME LOGGER
if args.use_tfboard:
from model.utils.logger import Logger
# Set the logger
current_t = time.strftime("%Y_%m_%d") + "_" + time.strftime("%H:%M:%S")
logger = Logger(os.path.join('.', 'logs', current_t + "_" + args.frame + "_" + args.dataset + "_" + args.net))
# init dataset
imdb, roidb, ratio_list, ratio_index, cls_list = combined_roidb(args.imdb_name)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
sampler_batch = sampler(train_size, args.batch_size)
iters_per_epoch = int(train_size / args.batch_size)
if args.frame in {"fpn", "faster_rcnn"}:
dataset = fasterrcnnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"ssd"}:
dataset = ssdbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"ssd_vmrn", "vam"}:
dataset = svmrnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"faster_rcnn_vmrn"}:
dataset = fvmrnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"fcgn"}:
dataset = fcgnbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"all_in_one"}:
dataset = fallinonebatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
elif args.frame in {"mgn"}:
dataset = roignbatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
len(cls_list), training=True, cls_list=cls_list,
augmentation=cfg.TRAIN.COMMON.AUGMENTATION)
else:
raise RuntimeError
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers)
args.iter_per_epoch = int(len(roidb) / args.batch_size)
# init output directory for model saving
output_dir = args.save_dir + "/" + args.dataset + "/" + args.net
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.vis:
visualizer = dataViewer(cls_list)
data_vis_dir = os.path.join(args.save_dir, args.dataset, 'data_vis', 'train')
if not os.path.exists(data_vis_dir):
os.makedirs(data_vis_dir)
id_number_to_name = {}
for r in roidb:
id_number_to_name[r["img_id"]] = r["image"]
# init network
Network, optimizer = init_network(args, len(cls_list))
# init variables
current_result, best_result, loss_temp, loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box, loss_rel_pred, \
loss_grasp_box, loss_grasp_cls, fg_cnt, bg_cnt, fg_grasp_cnt, bg_grasp_cnt = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
save_flag, rois, rpn_loss_cls, rpn_loss_box, rel_loss_cls, cls_prob, bbox_pred, rel_cls_prob, loss_bbox, loss_cls, \
rois_label, grasp_cls_loss, grasp_bbox_loss, grasp_conf_label = \
False, None, None, None, None, None, None, None, None, None, None, None, None, None
# initialize step counter
if args.resume:
step = args.checkpoint
else:
step = 0
for epoch in range(args.checkepoch, args.max_epochs + 1):
# setting to train mode
Network.train()
start_epoch_time = time.time()
start = time.time()
data_iter = iter(dataloader)
while (True):
if step >= iters_per_epoch:
break
# get data batch
data_batch = next(data_iter)
if args.vis:
for i in range(data_batch[0].size(0)):
data_list = [data_batch[d][i] for d in range(len(data_batch))]
im_vis = vis_gt(data_list, visualizer, args.frame, train_mode=True)
# img_name = id_number_to_name[data_batch[1][i][4].item()].split("/")[-1]
img_name = str(int(data_batch[1][i][4].item())) + ".jpg"
# When using cv2.imwrite, channel order should be BGR
cv2.imwrite(os.path.join(data_vis_dir, img_name), im_vis[:, :, ::-1])
# ship to cuda
if args.cuda:
data_batch = makeCudaData(data_batch)
# setting gradients to zeros
Network.zero_grad()
optimizer.zero_grad()
# forward process
if args.frame == 'faster_rcnn_vmrn':
rois, cls_prob, bbox_pred, rel_cls_prob, rpn_loss_cls, rpn_loss_box, loss_cls, \
loss_bbox, rel_loss_cls, reg_loss, rois_label = Network(data_batch)
loss = (rpn_loss_cls + rpn_loss_box + loss_cls + loss_bbox + reg_loss + rel_loss_cls).mean()
elif args.frame == 'faster_rcnn' or args.frame == 'fpn':
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, loss_cls, loss_bbox, \
rois_label = Network(data_batch)
loss = (rpn_loss_cls + rpn_loss_box + loss_cls + loss_bbox).mean()
elif args.frame == 'fcgn':
bbox_pred, cls_prob, loss_bbox, loss_cls, rois_label, rois = Network(data_batch)
loss = (loss_bbox + loss_cls).mean()
elif args.frame == 'mgn':
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, loss_cls, loss_bbox, rois_label, grasp_loc, \
grasp_prob, grasp_bbox_loss, grasp_cls_loss, grasp_conf_label, grasp_all_anchors = Network(data_batch)
loss = rpn_loss_box.mean() + rpn_loss_cls.mean() + loss_cls.mean() + loss_bbox.mean() + \
cfg.MGN.OBJECT_GRASP_BALANCE * (grasp_bbox_loss.mean() + grasp_cls_loss.mean())
elif args.frame == 'all_in_one':
rois, cls_prob, bbox_pred, rel_cls_prob, rpn_loss_cls, rpn_loss_box, loss_cls, loss_bbox, rel_loss_cls, reg_loss, rois_label, \
grasp_loc, grasp_prob, grasp_bbox_loss, grasp_cls_loss, grasp_conf_label, grasp_all_anchors = Network(
data_batch)
loss = (rpn_loss_box + rpn_loss_cls + loss_cls + loss_bbox + rel_loss_cls + reg_loss \
+ cfg.MGN.OBJECT_GRASP_BALANCE * grasp_bbox_loss + grasp_cls_loss).mean()
elif args.frame in {'ssd'}:
bbox_pred, cls_prob, loss_bbox, loss_cls = Network(data_batch)
loss = loss_bbox.mean() + loss_cls.mean()
elif args.frame == 'ssd_vmrn' or args.frame == 'vam':
bbox_pred, cls_prob, rel_result, loss_bbox, loss_cls, rel_loss_cls, reg_loss = Network(data_batch)
loss = (loss_cls + loss_bbox + rel_loss_cls + reg_loss).mean()
loss_temp += loss.data.item()
# backward process
loss.backward()
g_norm = gradient_norm(Network)
if args.net == "vgg16":
clip_gradient(Network, 10.)
# print("Gradient norm:{:.3f}".format(g_norm))
optimizer.step()
step += 1
# record training information
if len(args.mGPUs) > 0:
if rpn_loss_cls is not None and isinstance(rpn_loss_cls, torch.Tensor):
loss_rpn_cls += rpn_loss_cls.mean().data[0].item()
if rpn_loss_box is not None and isinstance(rpn_loss_box, torch.Tensor):
loss_rpn_box += rpn_loss_box.mean().data[0].item()
if loss_cls is not None and isinstance(loss_cls, torch.Tensor):
loss_rcnn_cls += loss_cls.mean().data[0].item()
if loss_bbox is not None and isinstance(loss_bbox, torch.Tensor):
loss_rcnn_box += loss_bbox.mean().data[0].item()
if rel_loss_cls is not None and isinstance(rel_loss_cls, torch.Tensor):
loss_rel_pred += rel_loss_cls.mean().data[0].item()
if grasp_cls_loss is not None and isinstance(grasp_cls_loss, torch.Tensor):
loss_grasp_cls += grasp_cls_loss.mean().data[0].item()
if grasp_bbox_loss is not None and isinstance(grasp_bbox_loss, torch.Tensor):
loss_grasp_box += grasp_bbox_loss.mean().data[0].item()
if rois_label is not None and isinstance(rois_label, torch.Tensor):
tempfg = torch.sum(rois_label.data.ne(0))
fg_cnt += tempfg
bg_cnt += (rois_label.data.numel() - tempfg)
if grasp_conf_label is not None and isinstance(grasp_conf_label, torch.Tensor):
tempfg = torch.sum(grasp_conf_label.data.ne(0))
fg_grasp_cnt += tempfg
bg_grasp_cnt += (grasp_conf_label.data.numel() - tempfg)
else:
if rpn_loss_cls is not None and isinstance(rpn_loss_cls, torch.Tensor):
loss_rpn_cls += rpn_loss_cls.item()
if rpn_loss_cls is not None and isinstance(rpn_loss_cls, torch.Tensor):
loss_rpn_box += rpn_loss_box.item()
if loss_cls is not None and isinstance(loss_cls, torch.Tensor):
loss_rcnn_cls += loss_cls.item()
if loss_bbox is not None and isinstance(loss_bbox, torch.Tensor):
loss_rcnn_box += loss_bbox.item()
if rel_loss_cls is not None and isinstance(rel_loss_cls, torch.Tensor):
loss_rel_pred += rel_loss_cls.item()
if grasp_cls_loss is not None and isinstance(grasp_cls_loss, torch.Tensor):
loss_grasp_cls += grasp_cls_loss.item()
if grasp_bbox_loss is not None and isinstance(grasp_bbox_loss, torch.Tensor):
loss_grasp_box += grasp_bbox_loss.item()
if rois_label is not None and isinstance(rois_label, torch.Tensor):
tempfg = torch.sum(rois_label.data.ne(0))
fg_cnt += tempfg
bg_cnt += (rois_label.data.numel() - tempfg)
if grasp_conf_label is not None and isinstance(grasp_conf_label, torch.Tensor):
tempfg = torch.sum(grasp_conf_label.data.ne(0))
fg_grasp_cnt += tempfg
bg_grasp_cnt += (grasp_conf_label.data.numel() - tempfg)
if Network.iter_counter % args.disp_interval == 0:
end = time.time()
loss_temp /= args.disp_interval
loss_rpn_cls /= args.disp_interval
loss_rpn_box /= args.disp_interval
loss_rcnn_cls /= args.disp_interval
loss_rcnn_box /= args.disp_interval
loss_rel_pred /= args.disp_interval
loss_grasp_cls /= args.disp_interval
loss_grasp_box /= args.disp_interval
print("[session %d][epoch %2d][iter %4d/%4d] \n\t\t\tloss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, optimizer.param_groups[0]['lr']))
print('\t\t\ttime cost: %f' % (end - start,))
if rois_label is not None:
print("\t\t\tfg/bg=(%d/%d)" % (fg_cnt, bg_cnt))
if grasp_conf_label is not None:
print("\t\t\tgrasp_fg/grasp_bg=(%d/%d)" % (fg_grasp_cnt, bg_grasp_cnt))
if rpn_loss_box is not None and rpn_loss_cls is not None:
print("\t\t\trpn_cls: %.4f\n\t\t\trpn_box: %.4f\n\t\t\trcnn_cls: %.4f\n\t\t\trcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
else:
print("\t\t\trcnn_cls: %.4f\n\t\t\trcnn_box %.4f" \
% (loss_rcnn_cls, loss_rcnn_box))
if rel_loss_cls is not None:
print("\t\t\trel_loss %.4f" \
% (loss_rel_pred,))
if grasp_cls_loss is not None and grasp_bbox_loss is not None:
print("\t\t\tgrasp_cls: %.4f\n\t\t\tgrasp_box %.4f" \
% (loss_grasp_cls, loss_grasp_box))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box,
}
if rpn_loss_cls:
info['loss_rpn_cls'] = loss_rpn_cls
if rpn_loss_box:
info['loss_rpn_box'] = loss_rpn_box
if rel_loss_cls:
info['loss_rel_pred'] = loss_rel_pred
for tag, value in info.items():
logger.scalar_summary(tag, value, Network.iter_counter)
loss_temp = 0.
loss_rpn_cls = 0.
loss_rpn_box = 0.
loss_rcnn_cls = 0.
loss_rcnn_box = 0.
loss_rel_pred = 0.
loss_grasp_box = 0.
loss_grasp_cls = 0.
fg_cnt = 0.
bg_cnt = 0.
fg_grasp_cnt = 0.
bg_grasp_cnt = 0.
start = time.time()
# adjust learning rate
if args.lr_decay_step == 0:
# clr = lr / (1 + decay * n) -> lr_n / lr_n+1 = (1 + decay * (n+1)) / (1 + decay * n)
decay = (1 + args.lr_decay_gamma * Network.iter_counter) / (
1 + args.lr_decay_gamma * (Network.iter_counter + 1))
adjust_learning_rate(optimizer, decay)
elif Network.iter_counter % (args.lr_decay_step) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
# test and save, minus 1 here could ensure the network will be evaluated at the beginning of the training
# if (Network.iter_counter - 1) % cfg.TRAIN.COMMON.SNAPSHOT_ITERS == 0:
if Network.iter_counter % cfg.TRAIN.COMMON.SNAPSHOT_ITERS == 0:
# test network and record results
if cfg.TRAIN.COMMON.SNAPSHOT_AFTER_TEST:
Network.eval()
with torch.no_grad():
current_result = evaluate_model(Network, args.imdbval_name, args)
torch.cuda.empty_cache()
if args.use_tfboard:
for key in current_result.keys():
logger.scalar_summary(key, current_result[key], Network.iter_counter)
Network.train()
if current_result["Main_Metric"] > best_result:
best_result = current_result["Main_Metric"]
save_flag = True
else:
save_flag = True
if save_flag:
Network.cpu()
save_name = os.path.join(output_dir, args.frame + '_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'model': Network.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.RCNN_COMMON.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
save_flag = False
Network.cuda()
end_epoch_time = time.time()
print("Epoch finished. Time costing: ", end_epoch_time - start_epoch_time, "s")
step = 0 # reset step counter
def test():
# check cuda devices
if not torch.cuda.is_available():
assert RuntimeError("Training can only be done by GPU. Please use --cuda to enable training.")
if torch.cuda.is_available() and not args.cuda:
assert RuntimeError("You have a CUDA device, so you should probably run with --cuda")
# init output directory for model saving
output_dir = args.save_dir + "/" + args.dataset + "/" + args.net
if not os.path.exists(output_dir):
os.makedirs(output_dir)
_, _, _, _, cls_list = combined_roidb(args.imdbval_name, training=False)
args.iter_per_epoch = None
# init network
Network, optimizer = init_network(args, len(cls_list))
Network.eval()
evaluate_model(Network, args.imdbval_name, args)
# def test_testcode():
# with open('det_res.pkl', 'rb') as f:
# all_boxes = pickle.load(f)
# imdb, roidb, ratio_list, ratio_index = combined_roidb('coco_2017_val+vmrd_compv1_test', False)
# imdb.evaluate_detections(all_boxes, 'output/coco+vmrd/res101')
if __name__ == '__main__':
# init arguments
args = read_cfgs()
assert args.frame in LEGAL_FRAMES, "Illegal algorithm name."
if args.test:
args.resume = True
test()
else:
train()
|
import numpy as np
from numpy import zeros
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header
class RealNonlinearRodArray(OES_Object): # 89-CRODNL, 92-CONRODNL
"""
::
ELEMENT-ID = 102
N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )
TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL
STRESS PLASTIC/NLELAST STRAIN STRESS
2.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
3.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
"""tested by elements/loadstep_elements.op2"""
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
self.nelements = 0 # result specific
@property
def is_real(self):
return True
@property
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self):
headers = ['axial_stress', 'equiv_stress', 'total_strain',
'effective_plastic_creep_strain', 'effective_creep_strain',
'linear_torsional_stress']
return headers
def build(self):
"""sizes the vectorized attributes of the RealNonlinearRodArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
# effective_creep_strain, linear_torsional_stress]
self.data = zeros((self.ntimes, self.nelements, 6), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Time 0.02 0.04
#ElementID Item
#102 axial_stress 19.413668 76.139496
# equiv_stress 19.413668 76.139496
# total_strain 0.000194 0.000761
# effective_plastic_creep_strain 0.000000 0.000000
# effective_creep_strain 0.000000 0.000000
# linear_torsional_stress 0.000000 0.000000
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
df1 = pd.DataFrame(self.element).T
df1.columns = ['ElementID']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
self.data_frame = df1.join([df2])
#print(self.data_frame)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1
(axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,
axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial_stress, equiv_stress, total_strain,
effective_plastic_creep_strain, effective_creep_strain, linear_torsional_stress):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
effective_creep_strain, linear_torsional_stress
]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
if is_sort1:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' ELEMENT-ID AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
else:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
if self.is_sort1:
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg)
else:
raise NotImplementedError('RealNonlinearRodArray')
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f06_file, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
eqs = self.data[itime, :, 1]
total = self.data[itime, :, 2]
epcs = self.data[itime, :, 3]
ecs = self.data[itime, :, 4]
lts = self.data[itime, :, 5]
#print "dt=%s axials=%s eqs=%s ts=%s epcs=%s ecs=%s lts=%s" %(dt,axial,eqs,ts,epcs,ecs,lts)
#msgE[eid] = ' ELEMENT-ID = %8i\n' % (eid)
#if eid not in msgT:
#msgT[eid] = []
#msgT[eid].append(' %9.3E %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (dt, axial, eqs, ts, epcs, ecs, lts))
for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids, axial, eqs, total, epcs, ecs, lts):
([saxial, seqs, stotal, sepcs, secs, slts]) = write_floats_13e(
[axiali, eqsi, totali, epcsi, ecsi, ltsi])
f06_file.write(
' %8i %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, saxial, seqs, stotal, sepcs, secs, slts))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i6f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
#print('3, %s' % itable)
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
#print('4, %s' % itable)
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
eqs = self.data[itime, :, 1]
total = self.data[itime, :, 2]
epcs = self.data[itime, :, 3]
ecs = self.data[itime, :, 4]
lts = self.data[itime, :, 5]
for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids_device, axial, eqs, total, epcs, ecs, lts):
data = [eid, axiali, eqsi, totali, epcsi, ecsi, ltsi]
op2_ascii.write(' eid=%s data=%s\n' % (eids_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
|
from typing import List, Optional, Tuple
import awkward
import numpy
import xgboost
def calculate_diphoton_mva(
mva: Tuple[Optional[xgboost.Booster], List[str]],
diphotons: awkward.Array,
events: awkward.Array,
) -> awkward.Array:
if mva[0] is None:
return diphotons
diphoton_mva = mva[0]
var_order = mva[1]
bdt_vars = {}
bdt_vars["dipho_leadIDMVA"] = diphotons.pho_lead.mvaID
bdt_vars["dipho_subleadIDMVA"] = diphotons.pho_sublead.mvaID
bdt_vars["dipho_leadEta"] = diphotons.pho_lead.eta
bdt_vars["dipho_subleadEta"] = diphotons.pho_sublead.eta
bdt_vars["dipho_lead_ptoM"] = diphotons.pho_lead.pt / diphotons.mass
bdt_vars["dipho_sublead_ptoM"] = diphotons.pho_sublead.pt / diphotons.mass
def calc_displacement(
photons: awkward.Array, events: awkward.Array
) -> awkward.Array:
x = photons.x_calo - events.PV.x
y = photons.y_calo - events.PV.y
z = photons.z_calo - events.PV.z
return awkward.zip({"x": x, "y": y, "z": z}, with_name="Vector3D")
v_lead = calc_displacement(diphotons.pho_lead, events)
v_sublead = calc_displacement(diphotons.pho_sublead, events)
p_lead = v_lead.unit() * diphotons.pho_lead.energyRaw
p_lead["energy"] = diphotons.pho_lead.energyRaw
p_lead = awkward.with_name(p_lead, "Momentum4D")
p_sublead = v_sublead.unit() * diphotons.pho_sublead.energyRaw
p_sublead["energy"] = diphotons.pho_sublead.energyRaw
p_sublead = awkward.with_name(p_sublead, "Momentum4D")
sech_lead = 1.0 / numpy.cosh(p_lead.eta)
sech_sublead = 1.0 / numpy.cosh(p_sublead.eta)
tanh_lead = numpy.cos(p_lead.theta)
tanh_sublead = numpy.cos(p_sublead.theta)
cos_dphi = numpy.cos(p_lead.deltaphi(p_sublead))
numerator_lead = sech_lead * (
sech_lead * tanh_sublead - tanh_lead * sech_sublead * cos_dphi
)
numerator_sublead = sech_sublead * (
sech_sublead * tanh_lead - tanh_sublead * sech_lead * cos_dphi
)
denominator = 1.0 - tanh_lead * tanh_sublead - sech_lead * sech_sublead * cos_dphi
add_reso = (
0.5
* (-numpy.sqrt(2.0) * events.BeamSpot.sigmaZ / denominator)
* (numerator_lead / p_lead.mag + numerator_sublead / p_sublead.mag)
)
dEnorm_lead = diphotons.pho_lead.energyErr / diphotons.pho_lead.energy
dEnorm_sublead = diphotons.pho_sublead.energyErr / diphotons.pho_sublead.energy
sigma_m = 0.5 * numpy.sqrt(dEnorm_lead ** 2 + dEnorm_sublead ** 2)
sigma_wv = numpy.sqrt(add_reso ** 2 + sigma_m ** 2)
vtx_prob = awkward.full_like(sigma_m, 0.999) # !!!! placeholder !!!!
bdt_vars["CosPhi"] = cos_dphi
bdt_vars["vtxprob"] = vtx_prob
bdt_vars["sigmarv"] = sigma_m
bdt_vars["sigmawv"] = sigma_wv
counts = awkward.num(diphotons, axis=-1)
bdt_inputs = numpy.column_stack(
[awkward.to_numpy(awkward.flatten(bdt_vars[name])) for name in var_order]
)
tempmatrix = xgboost.DMatrix(bdt_inputs, feature_names=var_order)
scores = diphoton_mva.predict(tempmatrix)
for var, arr in bdt_vars.items():
if "dipho" not in var:
diphotons[var] = arr
diphotons["bdt_score"] = awkward.unflatten(scores, counts)
return diphotons
|
import os
import requests
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files.storage import Storage as StorageBase
from django.utils.module_loading import import_string
def setting(name, default=None):
return getattr(settings, name, default)
class WebDavStorage(StorageBase):
def __init__(self, **kwargs):
self.requests = self.get_requests_instance(**kwargs)
self.webdav_url = self.set_webdav_url(**kwargs)
self.public_url = self.set_public_url(**kwargs)
self.listing_backend = kwargs.get('listinb_backend') or \
setting('WEBDAV_LISTING_BACKEND')
self.basic_auth = setting('WEBDAV_BASIC_AUTH')
if not self.webdav_url:
raise NotImplementedError('Please define webdav url')
if not self.public_url:
self.public_url = self.webdav_url
def set_webdav_url(self, **kwargs):
return kwargs.get('webdav_url') or setting('WEBDAV_URL')
def set_public_url(self, **kwargs):
return kwargs.get('public_url') or setting('WEBDAV_PUBLIC_URL')
def listdir(self, path):
if not self.listing_backend:
raise NotImplementedError(
'Listing backend not configured. Please set '
'the WEBDAV_LISTING_BACKEND option in your settings module '
'or pass the "listing_backend" keyword argument to the '
'storage constructor'
)
try:
return import_string(self.listing_backend)(self, path)
except ImportError:
raise NotImplementedError(
'Unable import the listing backend '
'as a {0}'.format(self.listing_backend)
)
except TypeError:
raise NotImplementedError(
'Wrong number of arguments. A listing backend should accept '
'two args: 1) a storage instance, 2) requested path'
)
def get_requests_instance(self, **kwargs):
return requests.Session()
def webdav(self, method, name, *args, **kwargs):
url = self.get_webdav_url(name)
method = method.lower()
if self.basic_auth:
if not kwargs:
kwargs = {}
kwargs["auth"] = (self.basic_auth["user"], self.basic_auth["password"])
response = getattr(self.requests, method)(url, *args, **kwargs)
response.raise_for_status()
return response
def get_public_url(self, name):
return self.public_url.rstrip('/') + '/' + name.lstrip('/')
def get_webdav_url(self, name):
return self.webdav_url.rstrip('/') + '/' + name.lstrip('/')
def _open(self, name, mode='rb'):
content = self.webdav('GET', name).content
return ContentFile(content, name)
def _save(self, name, content):
headers = None
if setting('WEBDAV_RECURSIVE_MKCOL', False):
self.make_collection(name)
if hasattr(content, 'temporary_file_path'):
with open(content.temporary_file_path(), 'rb') as f:
self.webdav(method='PUT',
name=name,
data=f,
headers=headers
)
else:
content.file.seek(0)
self.webdav(method='PUT',
name=name,
data=content.file,
headers=headers
)
return name
def make_collection(self, name):
coll_path = self.webdav_url
for directory in name.split('/')[:-1]:
col = os.path.join(coll_path, directory, '')
resp = self.requests.head(col)
if not resp.ok:
resp = self.requests.request('MKCOL', col)
resp.raise_for_status()
coll_path = os.path.join(coll_path, directory)
def delete(self, name):
try:
self.webdav('DELETE', name)
except requests.HTTPError:
pass
def exists(self, name):
try:
self.webdav('HEAD', name)
except requests.exceptions.HTTPError:
return False
else:
return True
def size(self, name):
try:
return int(self.webdav('HEAD', name).headers['content-length'])
except (ValueError, requests.exceptions.HTTPError):
raise IOError('Unable get size for %s' % name)
def url(self, name):
return self.get_public_url(name)
class WebDavStaticStorage(WebDavStorage):
base_url = setting('WEBDAV_STATIC_BASE_URL')
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 7777)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 10500)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations:
"""VirtualNetworksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> None:
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.VirtualNetwork":
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetwork"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "models.VirtualNetwork",
**kwargs
) -> "models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetwork"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "models.VirtualNetwork",
**kwargs
) -> "models.VirtualNetwork":
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VirtualNetwork
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.VirtualNetwork":
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetwork"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
async def check_ip_address_availability(
self,
resource_group_name: str,
virtual_network_name: str,
ip_address: str,
**kwargs
) -> "models.IPAddressAvailabilityResult":
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IPAddressAvailabilityResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> AsyncIterable["models.VirtualNetworkListUsageResult"]:
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkListUsageResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
from django import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_site_root(context):
return context['request'].site.root_page
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
def display_navbar(context):
parent = get_site_root(context)
if context.has_key('self'):
calling_page = context['self']
else:
calling_page = None
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = menuitem.get_children().live().in_menu().exists()
menuitem.active = (calling_page.url.startswith(menuitem.url) if calling_page else False)
return {
"calling_page": calling_page,
"menuitems": menuitems,
"request": context['request']
}
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
def display_navbar_dropdown(context, parent):
menuitems_children = parent.get_children().live().in_menu()
return {
"parent": parent,
"menuitems_children": menuitems_children,
"request": context['request'],
}
@register.filter
def get_item(dictionary, key):
"""
Return a value from dictionary in template
Args:
dictionary: dictionary getting values from
key: key to get a value from dictionary
Returns: dictionary value
"""
return dictionary.get(key)
|
""" PhySR for 3D GS """
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.optim import lr_scheduler
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as scio
import time
import os
from torch.nn.utils import weight_norm
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
torch.manual_seed(0)
np.random.seed(0)
torch.set_default_dtype(torch.float32)
laplace_3d = np.zeros((1, 1, 5, 5, 5))
elements = [
(-15/2, (0, 0, 0)),
(4 / 3, (1, 0, 0)),
(4 / 3, (0, 1, 0)),
(4 / 3, (0, 0, 1)),
(4 / 3, (-1, 0, 0)),
(4 / 3, (0, -1, 0)),
(4 / 3, (0, 0, -1)),
(-1 / 12, (-2, 0, 0)),
(-1 / 12, (0, -2, 0)),
(-1 / 12, (0, 0, -2)),
(-1 / 12, (2, 0, 0)),
(-1 / 12, (0, 2, 0)),
(-1 / 12, (0, 0, 2)),
]
for weight, (x, y, z) in elements:
laplace_3d[0, 0, x+2, y+2, z+2] = weight
def initialize_weights(module):
c = 1
if isinstance(module, nn.Conv3d):
module.weight.data.uniform_(-c*np.sqrt(1 / (3 * 3 * 320)),
c*np.sqrt(1 / (3 * 3 * 320)))
if isinstance(module, nn.Conv1d):
module.weight.data.uniform_(-c*np.sqrt(1 / (3 * 3 * 320)),
c*np.sqrt(1 / (3 * 3 * 320)))
elif isinstance(module, nn.Linear):
module.bias.data.zero_()
class ShiftMean(nn.Module):
# data: [t,b,c,d,h,w]
def __init__(self, mean, std):
super(ShiftMean, self).__init__()
self.mean = torch.Tensor(mean).view(1, 1, 2, 1, 1, 1)
self.std = torch.Tensor(std).view(1, 1, 2, 1, 1, 1)
def forward(self, x, mode):
if mode == 'sub':
return (x - self.mean.cuda()) / self.std.cuda()
elif mode == 'add':
return x * self.std.cuda() + self.mean.cuda()
else:
raise NotImplementedError
class PixelShuffle3d(nn.Module):
''' 3d version of pixelshuffle '''
def __init__(self, scale):
'''
:param scale: upsample scale
'''
super().__init__()
self.scale = scale
def forward(self, input):
batch_size, channels, in_depth, in_height, in_width = input.size()
nOut = channels // self.scale ** 3
out_depth = in_depth * self.scale
out_height = in_height * self.scale
out_width = in_width * self.scale
input_view = input.contiguous().view(batch_size, nOut, self.scale, self.scale, self.scale, in_depth, in_height, in_width)
output = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
return output.view(batch_size, nOut, out_depth, out_height, out_width)
class ConvLSTMCell(nn.Module):
def __init__(self, input_feats, hidden_feats, input_kernel_size, input_stride, input_padding):
super(ConvLSTMCell, self).__init__()
self.hidden_feats = hidden_feats
self.hidden_kernel_size = 3
self.num_features = 4
self.input_padding = input_padding
self.padding = int((self.hidden_kernel_size - 1) / 2) # for the hidden state
# input gate
self.Wxi = nn.Conv3d(input_feats, hidden_feats, input_kernel_size, input_stride,
input_padding, bias=True, padding_mode='circular')
self.Whi = nn.Conv3d(hidden_feats, hidden_feats, self.hidden_kernel_size,
1, padding=1, bias=False, padding_mode='circular')
# forget gate
self.Wxf = nn.Conv3d(input_feats, hidden_feats, input_kernel_size, input_stride,
input_padding, bias=True, padding_mode='circular')
self.Whf = nn.Conv3d(hidden_feats, hidden_feats, self.hidden_kernel_size,
1, padding=1, bias=False, padding_mode='circular')
# candidate gate
self.Wxc = nn.Conv3d(input_feats, hidden_feats, input_kernel_size, input_stride,
input_padding, bias=True, padding_mode='circular')
self.Whc = nn.Conv3d(hidden_feats, hidden_feats, self.hidden_kernel_size,
1, padding=1, bias=False, padding_mode='circular')
# output gate
self.Wxo = nn.Conv3d(input_feats, hidden_feats, input_kernel_size, input_stride,
input_padding, bias=True, padding_mode='circular')
self.Who = nn.Conv3d(hidden_feats, hidden_feats, self.hidden_kernel_size,
1, padding=1, bias=False, padding_mode='circular')
# initialization
nn.init.zeros_(self.Wxi.bias)
nn.init.zeros_(self.Wxf.bias)
nn.init.zeros_(self.Wxc.bias)
self.Wxo.bias.data.fill_(1.0)
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h))
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h))
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h))
ch = co * torch.tanh(cc)
return ch, cc
def init_hidden_tensor(self, prev_state):
return (Variable(prev_state[0]).cuda(), Variable(prev_state[1]).cuda())
class ResBlock(nn.Module):
def __init__(self, n_feats, expansion_ratio, res_scale=0.1):
super(ResBlock, self).__init__()
self.res_scale = res_scale
self.conv1 = weight_norm(nn.Conv3d(n_feats, n_feats*expansion_ratio, kernel_size=3,
padding=1, padding_mode='circular'))
self.conv2 = weight_norm(nn.Conv3d(n_feats*expansion_ratio, n_feats, kernel_size=3,
padding=1, padding_mode='circular'))
self.act = nn.ReLU(inplace=True)
def forward(self, x):
s = x
x = self.act(self.conv1(x))
x = self.conv2(x)
x = s + self.res_scale * x
return x
class temporal_sr(nn.Module):
def __init__(self, t_upscale_factor):
super(temporal_sr, self).__init__()
self.t_upscale_factor = t_upscale_factor
def forward(self, x):
t, b, c, d, h, w = x.shape
x = x.permute(1,3,4,5,2,0) # [b,d,h,w,c,t]
x = x.contiguous().view(b*d*h*w, c, t)
x = F.interpolate(x, scale_factor=self.t_upscale_factor, mode='linear', align_corners=True)
x = x.contiguous().view(b, d, h, w, c, t*self.t_upscale_factor)
x = x.permute(5,0,4,1,2,3) # [t,b,c,d,h,w]
return x
class PhySR(nn.Module):
def __init__(self, n_feats, n_layers, upscale_factor, shift_mean_paras, step=1, effective_step=[1]):
super(PhySR, self).__init__()
# n_layers: [n_convlstm, n_resblock]
self.n_convlstm, self.n_resblock = n_layers
self.t_up_factor, self.s_up_factor = upscale_factor
self.mean, self.std = shift_mean_paras
self.step = step
self.effective_step = effective_step
self._all_layers = []
################## temporal super-resolution ###################
# temporal interpolation
self.tsr = temporal_sr(self.t_up_factor)
# temporal correction - convlstm
for i in range(self.n_convlstm):
name = 'convlstm{}'.format(i)
cell = ConvLSTMCell(
input_feats=2,
hidden_feats=n_feats,
input_kernel_size=3,
input_stride=1,
input_padding=1)
setattr(self, name, cell)
self._all_layers.append(cell)
################## spatial super-resolution ###################
body = [ResBlock(n_feats, expansion_ratio=4, res_scale=0.1) for _ in range(self.n_resblock)]
tail = [weight_norm(nn.Conv3d(n_feats, 2*(self.s_up_factor ** 3),
kernel_size=3, padding=1, padding_mode='circular')), PixelShuffle3d(self.s_up_factor)]
skip = [weight_norm(nn.Conv3d(2, 2 * (self.s_up_factor ** 3), kernel_size=3, stride=1,
padding=1, padding_mode='circular')), PixelShuffle3d(self.s_up_factor)]
self.body = nn.Sequential(*body)
self.tail = nn.Sequential(*tail)
self.skip = nn.Sequential(*skip)
# initialize weights
self.apply(initialize_weights)
# shiftmean
self.shift_mean = ShiftMean(self.mean, self.std)
def forward(self, x, initial_state):
# input: [t,b,c,h,w]
internal_state = []
outputs = []
# normalize
x = self.shift_mean(x, mode='sub')
# temporal super-resolution
x = self.tsr(x)
for step in range(self.step):
# input:[t,b,c,d,h,w]
xt = x[step,...] # [b,c,d,h,w]
# skip connection
s = self.skip(xt)
# temporal correction
for i in range(self.n_convlstm):
name = 'convlstm{}'.format(i)
if step == 0:
(h,c) = getattr(self, name).init_hidden_tensor(
prev_state = initial_state[i])
internal_state.append((h,c))
# one-step forward
(h, c) = internal_state[i]
xt, new_c = getattr(self, name)(xt, h, c)
internal_state[i] = (xt, new_c)
# spatial super-resolution
xt = self.body(xt)
xt = self.tail(xt)
# residual connection
xt += s
xt = xt.view(1, 2, 2, 48, 48, 48) # [t,b,c,d,h,w]
if step in self.effective_step:
outputs.append(xt)
outputs = torch.cat(tuple(outputs), dim=0)
outputs = self.shift_mean(outputs, mode='add')
return outputs
class Conv3dDerivative(nn.Module):
def __init__(self, DerFilter, resol, kernel_size=5, name=''):
super(Conv3dDerivative, self).__init__()
self.resol = resol # constant in the finite difference
self.name = name
self.input_channels = 1
self.output_channels = 1
self.kernel_size = kernel_size
self.padding = int((kernel_size - 1) / 2)
self.filter = nn.Conv3d(self.input_channels, self.output_channels, self.kernel_size,
1, padding=0, bias=False)
# Fixed gradient operator
self.filter.weight = nn.Parameter(torch.FloatTensor(DerFilter), requires_grad=False)
def forward(self, input):
derivative = self.filter(input)
return derivative / self.resol
class Conv1dDerivative(nn.Module):
def __init__(self, DerFilter, resol, kernel_size=3, name=''):
super(Conv1dDerivative, self).__init__()
self.resol = resol # $\delta$*constant in the finite difference
self.name = name
self.input_channels = 1
self.output_channels = 1
self.kernel_size = kernel_size
self.padding = int((kernel_size - 1) / 2)
self.filter = nn.Conv1d(self.input_channels, self.output_channels, self.kernel_size,
1, padding=0, bias=False)
# Fixed gradient operator
self.filter.weight = nn.Parameter(torch.FloatTensor(DerFilter), requires_grad=False)
def forward(self, input):
derivative = self.filter(input)
return derivative / self.resol
class LossGenerator(nn.Module):
'''Calculate the physical loss and the data loss'''
def __init__(self, dt = (10.0/200), dx = (20.0/128)):
super(LossGenerator, self).__init__()
self.laplace = Conv3dDerivative(
DerFilter = laplace_3d,
resol = (dx**2),
kernel_size = 5,
name = 'laplace_operator').cuda()
# forward/backward derivative operator
self.dt = Conv1dDerivative(
DerFilter = [[[-1/2, 0, 1/2]]],
resol = (dt),
kernel_size = 3,
name = 'partial_t').cuda()
self.fwd_dt = Conv1dDerivative(
DerFilter = [[[-3/2, 2, -1/2]]],
resol = (dt),
kernel_size = 3,
name = 'forward_partial_t').cuda()
self.bwd_dt = Conv1dDerivative(
DerFilter = [[[1/2, -2, 3/2]]],
resol = (dt),
kernel_size = 3,
name = 'backward_partial_t').cuda()
def GetPhyLoss(self, output):
'''Calculate the physical loss'''
# output: [t,b,c,d,h,w]
############### spatial derivatives #################
# laplace u, [t-2,b,c,d,h,w]
u = output[:, :, 0:1, :, :, :]
len_t, len_b, len_c, len_d, len_h, len_w = u.shape
# [t,b,c,d,h,w] -> [t*b,c,d,h,w]
u = u.reshape(len_t*len_b, len_c, len_d, len_h, len_w)
laplace_u = self.laplace(u)
# change batch to [t,b,c,d,h,w]
laplace_u = laplace_u.reshape(len_t,len_b,len_c,len_d-4,len_h-4,len_w-4)
# laplace v, [t-2,b,c,d,h,w]
v = output[:, :, 1:2, :, :, :]
len_t, len_b, len_c, len_d, len_h, len_w = v.shape
v = v.reshape(len_t*len_b, len_c, len_d, len_h, len_w)
laplace_v = self.laplace(v)
laplace_v = laplace_v.reshape(len_t,len_b,len_c,len_d-4,len_h-4,len_w-4)
############### temporal derivatives #################
# u_t, [t,b,c,d-4,h-4,w-4]
u = output[:, :, 0:1, 2:-2, 2:-2, 2:-2]
len_t, len_b, len_c, len_d, len_h, len_w = u.shape
u = u.permute(3,4,5,1,2,0) # [d,h,w,b,c,t]
u = u.reshape(len_d*len_h*len_w*len_b, len_c, len_t) # [d*h*w*b,c,t]
u_t = self.dt(u) # [d*h*w*b,c,t-2]
u_t0 = self.fwd_dt(u[:,:,0:3])
u_tn = self.bwd_dt(u[:,:,-3:])
u_t = torch.cat((u_t0,u_t,u_tn), dim=2) # [d*h*w*b,c,t]
u_t = u_t.reshape(len_d, len_h, len_w, len_b, len_c, len_t)
u_t = u_t.permute(5,3,4,0,1,2) # [t,b,c,d,h,w]
# v_t, [t,b,c,d-4,h-4,w-4]
v = output[:, :, 1:2, 2:-2, 2:-2, 2:-2]
len_t, len_b, len_c, len_d, len_h, len_w = v.shape
v = v.permute(3,4,5,1,2,0) # [d,h,w,b,c,t]
v = v.reshape(len_d*len_h*len_w*len_b, len_c, len_t) # [d*h*w*b,c,t]
v_t = self.dt(v)
v_t0 = self.fwd_dt(v[:,:,0:3])
v_tn = self.bwd_dt(v[:,:,-3:])
v_t = torch.cat((v_t0, v_t, v_tn), dim=2)
v_t = v_t.reshape(len_d, len_h, len_w, len_b, len_c, len_t)
v_t = v_t.permute(5,3,4,0,1,2) # [t,b,c,d,h,w]
############### corresponding u & v ###################
u = output[:, :, 0:1, 2:-2, 2:-2, 2:-2] # [step, b, c, depth(Z), height(Y), width(X)]
v = output[:, :, 1:2, 2:-2, 2:-2, 2:-2] # [step, b, c, swpth(Z), height(Y), width(X)]
# make sure the dimensions consistent
assert laplace_u.shape == u_t.shape
assert u_t.shape == v_t.shape
assert laplace_u.shape == u.shape
assert laplace_v.shape == v.shape
# governing equations
DA = 0.2
DB = 0.1
f = 0.025
k = 0.055
f_u = DA * laplace_u - u*(v**2) + f*(1-u) - u_t
f_v = DB * laplace_v + u*(v**2) - (f+k)*v - v_t
return f_u, f_v
def GetModelLoss(self, model):
''' Get the L2-norm of the model '''
l2_reg = torch.tensor(0.).cuda()
for param in model.parameters():
l2_reg += torch.norm(param)
return l2_reg
def LossGen(output, truth, beta, loss_func):
L1_loss = nn.L1Loss()
MSE_loss = nn.MSELoss()
# data loss
data_loss = L1_loss(output, truth)
# phy loss, output shape: [t,b,c,d,h,w]
output = torch.cat((output[:, :, :, :, :, -2:], output, output[:, :, :, :, :, 0:3]), dim=5)
output = torch.cat((output[:, :, :, :, -2:, :], output, output[:, :, :, :, 0:3, :]), dim=4)
output = torch.cat((output[:, :, :, -2:, :, :], output, output[:, :, :, 0:3, :, :]), dim=3)
f_u, f_v = loss_func.GetPhyLoss(output)
phy_loss = MSE_loss(f_u, torch.zeros_like(f_u).cuda()) + MSE_loss(
f_v, torch.zeros_like(f_v).cuda())
loss = data_loss + beta * phy_loss
return loss, data_loss, phy_loss
def train(model, train_loader, val_loader, init_state, n_iters, lr, print_every, dt, dx,
beta, save_path, pretrain_flag=False):
# train_loader: low resolution tensor
# beta works on physics loss
best_error = 1e2
print_loss_total = 0
train_loss_list, val_loss_list, val_error_list = [], [], []
pretrain_save_path = save_path + 'pretrain.pt'
model_save_path = save_path + 'checkpoint.pt'
if pretrain_flag == True:
model, _, _ = load_checkpoint(model, optimizer=None, scheduler=None,
save_dir=pretrain_save_path)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.998)
loss_function = LossGenerator(dt, dx)
for epoch in range(n_iters):
for idx, (lres, hres) in enumerate(train_loader):
optimizer.zero_grad()
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# compute loss
loss, data_loss, phy_loss = LossGen(outputs, hres, beta, loss_function)
loss.backward(retain_graph=True)
print_loss_total += loss.item()
# gradient clipping
nn.utils.clip_grad_value_(model.parameters(), clip_value=1.0)
optimizer.step()
scheduler.step()
if (epoch+1) % print_every == 0:
# calculate the average training loss
print_loss_mean = print_loss_total / (print_every*len(train_loader))
train_loss_list.append(print_loss_mean)
print_loss_total = 0
# print the training loss
print('Train loss (%d/%d %d%%): %.8f' % (epoch+1, n_iters,
(epoch+1)/n_iters*100, print_loss_mean))
# for print training loss (details)
print('Epoch %d: data loss(%.8f), phy loss(%.8f)' %(
epoch+1, data_loss.item(), phy_loss.item()))
# calculate the validation loss
val_loss, val_error = validate(model, val_loader, init_state, loss_function, beta)
# for print validation loss
print('Epoch (%d/%d %d%%): val loss %.8f, val error %.8f' % (epoch+1, n_iters,
(epoch+1)/n_iters*100, val_loss, val_error))
print('')
val_loss_list.append(val_loss)
val_error_list.append(val_error)
# save model
if val_error < best_error:
save_checkpoint(model, optimizer, scheduler, model_save_path)
best_error = val_error
return train_loss_list, val_loss_list, val_error_list
def validate(model, val_loader, init_state, loss_function, beta):
''' evaluate the model performance '''
val_loss = 0
val_error = 0
MSE_function = nn.MSELoss()
for idx, (lres, hres) in enumerate(val_loader):
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# calculate the loss
loss,_,_ = LossGen(outputs, hres, beta, loss_function)
val_loss += loss.item()
# calculate the error
error = torch.sqrt(MSE_function(hres, outputs.detach()) / MSE_function(
hres, torch.zeros_like(hres).cuda()))
val_error += error.item()
val_error = val_error / len(val_loader)
val_loss = val_loss / len(val_loader)
return val_loss, val_error
def test(model, test_loader, init_state, save_path, fig_save_path):
# load the well-trained model
model_save_path = save_path + 'checkpoint.pt'
model, _, _ = load_checkpoint(model, optimizer=None, scheduler=None,
save_dir=model_save_path)
MSE_function = nn.MSELoss()
pred_error = 0
for idx, (lres, hres) in enumerate(test_loader):
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# calculate the error
error = torch.sqrt(MSE_function(hres, outputs.detach()) / MSE_function(
hres, torch.zeros_like(hres).cuda()))
pred_error += error.item()
torch.save({"pred": outputs.detach().cpu(), "lres": lres.cpu(),
"hres": hres.cpu()}, save_path + 'output_'+str(idx)+'.pt')
# comparison plot
t = np.arange(hres.shape[0])
for b in range(hres.shape[1]):
u_pred = outputs[:, b, 0, :, :, :].detach().cpu().numpy()
u_true = hres[:, b, 0, :, :, :].cpu().numpy()
plt.figure()
plt.plot(t, u_pred[:, 24, 24, 24], label = 'u-wdsr')
plt.plot(t, u_true[:, 24, 24, 24], label = 'u-Ref.')
plt.xlabel('t')
plt.ylabel('u')
plt.legend()
plt.savefig(fig_save_path + 'u_comp_[i=%d][b=%d].png' %(idx, b))
pred_error = pred_error/len(test_loader)
return pred_error
def save_checkpoint(model, optimizer, scheduler, save_dir):
'''save model and optimizer'''
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_dir)
def load_checkpoint(model, optimizer, scheduler, save_dir):
'''load model and optimizer'''
checkpoint = torch.load(save_dir)
model.load_state_dict(checkpoint['model_state_dict'])
if (not optimizer is None):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
print('Pretrained model loaded!')
return model, optimizer, scheduler
class GSDataset(Dataset):
def __init__(self, data_dir, data_fname, ICs, n_slices):
'''
Args:
-----
data_dir: str,
folder path to the data
data_fname: str
the name of the dataset file
ICs: list
the list of random noise parameters
'''
self.data_dir = data_dir
self.data_fname = data_fname
self.ICs = ICs
self.n_slices = n_slices
self.samples = []
for i in range(len(self.ICs)):
# define the data filename
lres_filename = self.data_fname + str(ICs[i]) + '_2x501x12x12x12.mat'
hres_filename = self.data_fname + str(ICs[i]) + '_2x1501x48x48x48.mat'
# load the lres and hres tensor
lres = scio.loadmat(os.path.join(data_dir, lres_filename))
hres = scio.loadmat(os.path.join(data_dir, hres_filename))
lres = lres['uv'] # [251,2,6,6,6]
hres = hres['uv'][500:,...] # [1001,2,48,48,48]
lres_dt, hres_dt = int(lres.shape[0]/n_slices), int(hres.shape[0]/n_slices)
for j in range(n_slices):
lres_tensor = torch.tensor(lres[j*lres_dt:(j+1)*lres_dt,...], dtype=torch.float32)
hres_tensor = torch.tensor(hres[j*hres_dt:(j+1)*hres_dt,...], dtype=torch.float32)
self.samples.append((lres_tensor, hres_tensor))
def __len__(self):
return int(len(self.ICs)*self.n_slices)
def __getitem__(self, idx):
return self.samples[idx]
def get_init_state(batch_size, hidden_channels, output_size, mode='coord'):
'''initial hidden states for all convlstm layers'''
# (b, c, h, w)
num_layers = len(hidden_channels)
initial_state = []
if mode == 'coord':
for i in range(num_layers):
resolution = output_size[i][0]
x, y = [np.linspace(-64, 64, resolution+1)] * 2
x, y = np.meshgrid(x[:-1], y[:-1]) # [32, 32]
xy = np.concatenate((x[None, :], y[None, :]), 0) # [2, 32, 32]
xy = np.repeat(xy, int(hidden_channels[i]/2), axis=0) # [c,h,w]
xy = np.repeat(xy[None, :], batch_size[i], 0) # [b,c,h,w]
xy = torch.tensor(xy, dtype=torch.float32)
initial_state.append((xy, xy))
elif mode == 'zero':
for i in range(num_layers):
(h0, c0) = (torch.zeros(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1]), torch.zeros(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1]))
initial_state.append((h0,c0))
elif mode == 'random':
for i in range(num_layers):
(h0, c0) = (torch.randn(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1], output_size[i][2]), torch.randn(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1], output_size[i][2]))
initial_state.append((h0,c0))
else:
raise NotImplementedError
return initial_state
if __name__ == '__main__':
print('Super-Resolution for 3D GS equation...')
# define the data file path
data_dir = './data/3DGS/'
data_fname = '3DGS_IC'
# define the initial consitions
ICs = np.arange(1,3)
n_slices = 10
n_datasets = len(ICs) * n_slices
data_loader = GSDataset(data_dir, data_fname, ICs, n_slices)
# get mean and std
total_hres = torch.zeros(n_datasets, 100, 2, 48, 48, 48)
total_lres = torch.zeros(n_datasets, 50, 2, 12, 12, 12) # [b,t,c,d,h,w]
for i in range(len(data_loader)):
total_hres[i,...] = data_loader[i][1]
total_lres[i,...] = data_loader[i][0]
mean_hres = torch.mean(total_hres, axis = (0,1,3,4,5))
std_hres = torch.std(total_hres, axis = (0,1,3,4,5))
# split data
split_ratio = [int(n_datasets*0.7), int(n_datasets*0.2), int(n_datasets*0.1)]
train_data, val_data, test_data = torch.utils.data.random_split(data_loader, split_ratio)
# change to pytorch data
# data in train_loader is [b, t, c, h, w] -> [1, 151, 2, 32, 32]
train_loader = torch.utils.data.DataLoader(train_data, batch_size = 2,
shuffle=True, num_workers=0)
val_loader = torch.utils.data.DataLoader(val_data, batch_size = 2,
shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size = 2,
shuffle=False, num_workers=0)
######################### build model #############################
# training parameters
n_iters = 2000
print_every = 2
learning_rate = 1e-3
dt = 1.0
dx = 100.0 / 48.0
steps = 100
effective_step = list(range(0, steps))
beta = 0.025 # for physics loss
save_path = './model/3DGS/'
fig_save_path = './figures/3DGS/'
model = PhySR(
n_feats = 32,
n_layers = [1, 2], # [n_convlstm, n_resblock]
upscale_factor = [2, 4], # [t_up, s_up]
shift_mean_paras = [mean_hres, std_hres],
step = steps,
effective_step = effective_step).cuda()
# define the initial states and initial output for model
init_state = get_init_state(
batch_size = [2],
hidden_channels = [32],
output_size = [[12,12,12]],
mode = 'random')
start = time.time()
train_loss_list, val_loss_list, val_error_list = train(model, train_loader, val_loader,
init_state, n_iters, learning_rate, print_every, dt, dx, beta, save_path)
end = time.time()
print('The training time is: ', (end - start))
print('')
np.save(save_path + 'train_loss', train_loss_list)
np.save(save_path + 'val_loss', val_loss_list)
np.save(save_path + 'val_error', val_error_list)
###################### model inference ###########################
pred_error = test(model, test_loader, init_state, save_path, fig_save_path)
print('The predictive error is: ', pred_error)
print('Test completed')
# plot loss
x_axis = np.arange(0, n_iters, print_every)
plt.figure()
plt.plot(x_axis, train_loss_list, label = 'train loss')
plt.yscale('log')
plt.legend()
plt.savefig(fig_save_path + 'train loss.png', dpi = 300)
plt.figure()
plt.plot(x_axis, val_loss_list, label = 'val loss')
plt.yscale('log')
plt.legend()
plt.savefig(fig_save_path + 'val loss.png', dpi = 300)
plt.figure()
plt.plot(x_axis, val_error_list, label = 'val error')
plt.yscale('log')
plt.legend()
plt.savefig(fig_save_path + 'val error.png', dpi = 300)
|
SECRET_KEY = "thisisasecretkeyfortests.itisverysecure"
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'vinaigrette',
)
USE_I18N = True
LANGUAGES = (
('en', u'English'),
('fr', u'Franรงais'),
)
LANGUAGE_CODE = 'en'
ROOT_URLCONF = 'test_project.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
|
# __init__.py is a special Python file that allows a directory to become
# a Python package so it can be accessed using the 'import' statement.
from .init_db import InitDbCommand
|
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import argparse
import tensorflow as tf
import os
import sys
import math
import collections
from tensorflow.python.client import timeline
import json
from tensorflow.python.ops import partitioned_variables
# Set to INFO for tracking training, default is WARN. ERROR for least messages
tf.logging.set_verbosity(tf.logging.INFO)
print("Using TensorFlow version %s" % (tf.__version__))
# Definition of some constants
CONTINUOUS_COLUMNS = ['I' + str(i) for i in range(1, 14)] # 1-13 inclusive
CATEGORICAL_COLUMNS = ['C' + str(i) for i in range(1, 27)] # 1-26 inclusive
LABEL_COLUMN = ['clicked']
TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
HASH_BUCKET_SIZES = {
'C1': 2500,
'C2': 2000,
'C3': 300000,
'C4': 250000,
'C5': 1000,
'C6': 100,
'C7': 20000,
'C8': 4000,
'C9': 20,
'C10': 100000,
'C11': 10000,
'C12': 250000,
'C13': 40000,
'C14': 100,
'C15': 100,
'C16': 200000,
'C17': 50,
'C18': 10000,
'C19': 4000,
'C20': 20,
'C21': 250000,
'C22': 100,
'C23': 100,
'C24': 250000,
'C25': 400,
'C26': 100000
}
EMBEDDING_DIMENSIONS = {
'C1': 64,
'C2': 64,
'C3': 128,
'C4': 128,
'C5': 64,
'C6': 64,
'C7': 64,
'C8': 64,
'C9': 64,
'C10': 128,
'C11': 64,
'C12': 128,
'C13': 64,
'C14': 64,
'C15': 64,
'C16': 128,
'C17': 64,
'C18': 64,
'C19': 64,
'C20': 64,
'C21': 128,
'C22': 64,
'C23': 64,
'C24': 128,
'C25': 64,
'C26': 128
}
class WDL():
def __init__(self,
wide_column=None,
deep_column=None,
dnn_hidden_units=[1024, 512, 256],
optimizer_type='adam',
linear_learning_rate=0.2,
deep_learning_rate=0.01,
inputs=None,
bf16=False,
stock_tf=None,
adaptive_emb=False,
input_layer_partitioner=None,
dense_layer_partitioner=None):
if not inputs:
raise ValueError("Dataset is not defined.")
self._feature = inputs[0]
self._label = inputs[1]
self._wide_column = wide_column
self._deep_column = deep_column
if not wide_column or not deep_column:
raise ValueError("Wide column or Deep column is not defined.")
self.tf = stock_tf
self.bf16 = False if self.tf else bf16
self.is_training = True
self._adaptive_emb = adaptive_emb
self._dnn_hidden_units = dnn_hidden_units
self._linear_learning_rate = linear_learning_rate
self._deep_learning_rate = deep_learning_rate
self._optimizer_type = optimizer_type
self._input_layer_partitioner = input_layer_partitioner
self._dense_layer_partitioner = dense_layer_partitioner
self._create_model()
with tf.name_scope('head'):
self._create_loss()
self._create_optimizer()
self._create_metrics()
# used to add summary in tensorboard
def _add_layer_summary(self, value, tag):
tf.summary.scalar('%s/fraction_of_zero_values' % tag,
tf.nn.zero_fraction(value))
tf.summary.histogram('%s/activation' % tag, value)
def _dnn(self, dnn_input, dnn_hidden_units=None, layer_name=''):
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with tf.variable_scope(layer_name + '_%d' % layer_id,
partitioner=self._dense_layer_partitioner,
reuse=tf.AUTO_REUSE) as dnn_layer_scope:
dnn_input = tf.layers.dense(
dnn_input,
units=num_hidden_units,
activation=tf.nn.relu,
kernel_initializer=tf.glorot_uniform_initializer(),
name=dnn_layer_scope)
self._add_layer_summary(dnn_input, dnn_layer_scope.name)
return dnn_input
# create model
def _create_model(self):
# Dnn part
with tf.variable_scope('dnn'):
# input layer
with tf.variable_scope('input_from_feature_columns',
partitioner=self._input_layer_partitioner,
reuse=tf.AUTO_REUSE):
if self._adaptive_emb and not self.tf:
'''Adaptive Embedding Feature Part 1 of 2'''
adaptive_mask_tensors = {}
for col in CATEGORICAL_COLUMNS:
adaptive_mask_tensors[col] = tf.ones([args.batch_size],
tf.int32)
net = tf.feature_column.input_layer(
features=self._feature,
feature_columns=self._deep_column,
adaptive_mask_tensors=adaptive_mask_tensors)
else:
net = tf.feature_column.input_layer(
features=self._feature,
feature_columns=self._deep_column)
self._add_layer_summary(net, 'input_from_feature_columns')
# hidden layers
dnn_scope = tf.variable_scope('dnn_layers', \
partitioner=self._dense_layer_partitioner, reuse=tf.AUTO_REUSE)
with dnn_scope.keep_weights(dtype=tf.float32) if self.bf16 \
else dnn_scope:
if self.bf16:
net = tf.cast(net, dtype=tf.bfloat16)
net = self._dnn(net, self._dnn_hidden_units, 'hiddenlayer')
if self.bf16:
net = tf.cast(net, dtype=tf.float32)
# dnn logits
logits_scope = tf.variable_scope('logits')
with logits_scope.keep_weights(dtype=tf.float32) if self.bf16 \
else logits_scope as dnn_logits_scope:
dnn_logits = tf.layers.dense(net,
units=1,
activation=None,
name=dnn_logits_scope)
self._add_layer_summary(dnn_logits, dnn_logits_scope.name)
# linear part
with tf.variable_scope(
'linear', partitioner=self._dense_layer_partitioner) as scope:
linear_logits = tf.feature_column.linear_model(
units=1,
features=self._feature,
feature_columns=self._wide_column,
sparse_combiner='sum',
weight_collections=None,
trainable=True)
self._add_layer_summary(linear_logits, scope.name)
self._logits = tf.add_n([dnn_logits, linear_logits])
self.probability = tf.math.sigmoid(self._logits)
self.output = tf.round(self.probability)
# compute loss
def _create_loss(self):
self._logits = tf.squeeze(self._logits)
self.loss = tf.losses.sigmoid_cross_entropy(
self._label,
self._logits,
scope='loss',
reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
tf.summary.scalar('loss', self.loss)
# define optimizer and generate train_op
def _create_optimizer(self):
self.global_step = tf.train.get_or_create_global_step()
if self.tf or self._optimizer_type == 'adam':
dnn_optimizer = tf.train.AdamOptimizer(
learning_rate=self._deep_learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-8)
elif self._optimizer_type == 'adagrad':
dnn_optimizer = tf.train.AdagradOptimizer(
learning_rate=self._deep_learning_rate,
initial_accumulator_value=0.1,
use_locking=False)
elif self._optimizer_type == 'adamasync':
dnn_optimizer = tf.train.AdamAsyncOptimizer(
learning_rate=self._deep_learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-8)
elif self._optimizer_type == 'adagraddecay':
dnn_optimizer = tf.train.AdagradDecayOptimizer(
learning_rate=self._deep_learning_rate,
global_step=self.global_step)
else:
raise ValueError("Optimzier type error.")
linear_optimizer = tf.train.FtrlOptimizer(
learning_rate=self._linear_learning_rate,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
train_ops = []
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_ops.append(
dnn_optimizer.minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='dnn'),
global_step=self.global_step))
train_ops.append(
linear_optimizer.minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='linear')))
self.train_op = tf.group(*train_ops)
# compute acc & auc
def _create_metrics(self):
self.acc, self.acc_op = tf.metrics.accuracy(labels=self._label,
predictions=self.output)
self.auc, self.auc_op = tf.metrics.auc(labels=self._label,
predictions=self.probability,
num_thresholds=1000)
tf.summary.scalar('eval_acc', self.acc)
tf.summary.scalar('eval_auc', self.auc)
# generate dataset pipline
def build_model_input(filename, batch_size, num_epochs):
def parse_csv(value):
tf.logging.info('Parsing {}'.format(filename))
cont_defaults = [[0.0] for i in range(1, 14)]
cate_defaults = [[' '] for i in range(1, 27)]
label_defaults = [[0]]
column_headers = TRAIN_DATA_COLUMNS
record_defaults = label_defaults + cont_defaults + cate_defaults
columns = tf.io.decode_csv(value, record_defaults=record_defaults)
all_columns = collections.OrderedDict(zip(column_headers, columns))
labels = all_columns.pop(LABEL_COLUMN[0])
features = all_columns
return features, labels
'''Work Queue Feature'''
if args.workqueue and not args.tf:
from tensorflow.python.ops.work_queue import WorkQueue
work_queue = WorkQueue([filename])
# For multiple files๏ผ
# work_queue = WorkQueue([filename, filename1,filename2,filename3])
files = work_queue.input_dataset()
else:
files = filename
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(files)
dataset = dataset.shuffle(buffer_size=20000,
seed=args.seed) # fix seed for reproducing
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(parse_csv, num_parallel_calls=28)
dataset = dataset.prefetch(2)
return dataset
# generate feature columns
def build_feature_columns():
# Notes: Statistics of Kaggle's Criteo Dataset has been calculated in advance to save time.
mins_list = [
0.0, -3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
]
range_list = [
1539.0, 22069.0, 65535.0, 561.0, 2655388.0, 233523.0, 26297.0, 5106.0,
24376.0, 9.0, 181.0, 1807.0, 6879.0
]
def make_minmaxscaler(min, range):
def minmaxscaler(col):
return (col - min) / range
return minmaxscaler
deep_columns = []
wide_columns = []
for column_name in FEATURE_COLUMNS:
if column_name in CATEGORICAL_COLUMNS:
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
column_name, hash_bucket_size=10000, dtype=tf.string)
wide_columns.append(categorical_column)
if not args.tf:
'''Feature Elimination of EmbeddingVariable Feature'''
if args.ev_elimination == 'gstep':
# Feature elimination based on global steps
evict_opt = tf.GlobalStepEvict(steps_to_live=4000)
elif args.ev_elimination == 'l2':
# Feature elimination based on l2 weight
evict_opt = tf.L2WeightEvict(l2_weight_threshold=1.0)
else:
evict_opt = None
'''Feature Filter of EmbeddingVariable Feature'''
if args.ev_filter == 'cbf':
# CBF-based feature filter
filter_option = tf.CBFFilter(
filter_freq=3,
max_element_size=2**30,
false_positive_probability=0.01,
counter_type=tf.int64)
elif args.ev_filter == 'counter':
# Counter-based feature filter
filter_option = tf.CounterFilter(filter_freq=3)
else:
filter_option = None
ev_opt = tf.EmbeddingVariableOption(
evict_option=evict_opt, filter_option=filter_option)
if args.ev:
'''Embedding Variable Feature'''
categorical_column = tf.feature_column.categorical_column_with_embedding(
column_name, dtype=tf.string, ev_option=ev_opt)
elif args.adaptive_emb:
''' Adaptive Embedding Feature Part 2 of 2
Expcet the follow code, a dict, 'adaptive_mask_tensors', is need as the input of
'tf.feature_column.input_layer(adaptive_mask_tensors=adaptive_mask_tensors)'.
For column 'COL_NAME',the value of adaptive_mask_tensors['$COL_NAME'] is a int32
tensor with shape [batch_size].
'''
categorical_column = tf.feature_column.categorical_column_with_adaptive_embedding(
column_name,
hash_bucket_size=HASH_BUCKET_SIZES[column_name],
dtype=tf.string,
ev_option=ev_opt)
elif args.dynamic_ev:
'''Dynamic-dimension Embedding Variable'''
print(
"Dynamic-dimension Embedding Variable isn't really enabled in model."
)
sys.exit()
if args.tf or not args.emb_fusion:
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=EMBEDDING_DIMENSIONS[column_name],
combiner='mean')
else:
'''Embedding Fusion Feature'''
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=EMBEDDING_DIMENSIONS[column_name],
combiner='mean',
do_fusion=args.emb_fusion)
deep_columns.append(embedding_column)
else:
normalizer_fn = None
i = CONTINUOUS_COLUMNS.index(column_name)
normalizer_fn = make_minmaxscaler(mins_list[i], range_list[i])
column = tf.feature_column.numeric_column(
column_name, normalizer_fn=normalizer_fn, shape=(1, ))
wide_columns.append(column)
deep_columns.append(column)
return wide_columns, deep_columns
def train(sess_config,
input_hooks,
model,
data_init_op,
steps,
checkpoint_dir,
tf_config=None,
server=None):
model.is_training = True
hooks = []
hooks.extend(input_hooks)
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.local_variables_initializer(), data_init_op),
saver=tf.train.Saver(max_to_keep=args.keep_checkpoint_max))
stop_hook = tf.train.StopAtStepHook(last_step=steps)
log_hook = tf.train.LoggingTensorHook(
{
'steps': model.global_step,
'loss': model.loss
}, every_n_iter=100)
hooks.append(stop_hook)
hooks.append(log_hook)
if args.timeline > 0:
hooks.append(
tf.train.ProfilerHook(save_steps=args.timeline,
output_dir=checkpoint_dir))
save_steps = args.save_steps if args.save_steps or args.no_eval else steps
'''
Incremental_Checkpoint
Please add `save_incremental_checkpoint_secs` in 'tf.train.MonitoredTrainingSession'
it's default to None, Incremental_save checkpoint time in seconds can be set
to use incremental checkpoint function, like `tf.train.MonitoredTrainingSession(
save_incremental_checkpoint_secs=args.incremental_ckpt)`
'''
if args.incremental_ckpt and not args.tf:
print("Incremental_Checkpoint is not really enabled.")
print("Please see the comments in the code.")
sys.exit()
with tf.train.MonitoredTrainingSession(
master=server.target if server else '',
is_chief=tf_config['is_chief'] if tf_config else True,
hooks=hooks,
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
save_checkpoint_steps=save_steps,
summary_dir=checkpoint_dir,
save_summaries_steps=args.save_steps,
config=sess_config) as sess:
while not sess.should_stop():
sess.run([model.loss, model.train_op])
print("Training completed.")
def eval(sess_config, input_hooks, model, data_init_op, steps, checkpoint_dir):
model.is_training = False
hooks = []
hooks.extend(input_hooks)
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.local_variables_initializer(), data_init_op))
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold, checkpoint_dir=checkpoint_dir, config=sess_config)
writer = tf.summary.FileWriter(os.path.join(checkpoint_dir, 'eval'))
merged = tf.summary.merge_all()
with tf.train.MonitoredSession(session_creator=session_creator,
hooks=hooks) as sess:
for _in in range(1, steps + 1):
if (_in != steps):
sess.run([model.acc_op, model.auc_op])
if (_in % 1000 == 0):
print("Evaluation complate:[{}/{}]".format(_in, steps))
else:
eval_acc, eval_auc, events = sess.run(
[model.acc_op, model.auc_op, merged])
writer.add_summary(events, _in)
print("Evaluation complate:[{}/{}]".format(_in, steps))
print("ACC = {}\nAUC = {}".format(eval_acc, eval_auc))
def main(tf_config=None, server=None):
# check dataset and count data set size
print("Checking dataset...")
train_file = args.data_location + '/train.csv'
test_file = args.data_location + '/eval.csv'
if (not os.path.exists(train_file)) or (not os.path.exists(test_file)):
print("Dataset does not exist in the given data_location.")
sys.exit()
no_of_training_examples = sum(1 for line in open(train_file))
no_of_test_examples = sum(1 for line in open(test_file))
print("Numbers of training dataset is {}".format(no_of_training_examples))
print("Numbers of test dataset is {}".format(no_of_test_examples))
# set batch size, eporch & steps
batch_size = math.ceil(
args.batch_size / args.micro_batch
) if args.micro_batch and not args.tf else args.batch_size
if args.steps == 0:
no_of_epochs = 1
train_steps = math.ceil(
(float(no_of_epochs) * no_of_training_examples) / batch_size)
else:
no_of_epochs = math.ceil(
(float(batch_size) * args.steps) / no_of_training_examples)
train_steps = args.steps
test_steps = math.ceil(float(no_of_test_examples) / batch_size)
print("The training steps is {}".format(train_steps))
print("The testing steps is {}".format(test_steps))
# set fixed random seed
tf.set_random_seed(args.seed)
# set directory path for checkpoint_dir
model_dir = os.path.join(args.output_dir,
'model_WIDE_AND_DEEP_' + str(int(time.time())))
checkpoint_dir = args.checkpoint if args.checkpoint else model_dir
print("Saving model checkpoints to " + checkpoint_dir)
# create data pipline of train & test dataset
train_dataset = build_model_input(train_file, batch_size, no_of_epochs)
test_dataset = build_model_input(test_file, batch_size, 1)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
test_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
test_init_op = iterator.make_initializer(test_dataset)
# create feature column
wide_column, deep_column = build_feature_columns()
# create variable partitioner for distributed training
num_ps_replicas = len(tf_config['ps_hosts']) if tf_config else 0
input_layer_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=args.input_layer_partitioner <<
20) if args.input_layer_partitioner else None
dense_layer_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=args.dense_layer_partitioner <<
10) if args.dense_layer_partitioner else None
# Session config
sess_config = tf.ConfigProto()
sess_config.inter_op_parallelism_threads = args.inter
sess_config.intra_op_parallelism_threads = args.intra
# Session hooks
hooks = []
if args.smartstaged and not args.tf:
'''Smart staged Feature'''
next_element = tf.staged(next_element, num_threads=4, capacity=40)
sess_config.graph_options.optimizer_options.do_smart_stage = True
hooks.append(tf.make_prefetch_hook())
if args.op_fusion and not args.tf:
'''Auto Graph Fusion'''
sess_config.graph_options.optimizer_options.do_op_fusion = True
if args.micro_batch and not args.tf:
'''Auto Mirco Batch'''
sess_config.graph_options.optimizer_options.micro_batch_num = args.micro_batch
# create model
model = WDL(wide_column=wide_column,
deep_column=deep_column,
linear_learning_rate=args.linear_learning_rate,
deep_learning_rate=args.deep_learning_rate,
optimizer_type=args.optimizer,
bf16=args.bf16,
stock_tf=args.tf,
adaptive_emb=args.adaptive_emb,
inputs=next_element,
input_layer_partitioner=input_layer_partitioner,
dense_layer_partitioner=dense_layer_partitioner)
# Run model training and evaluation
train(sess_config, hooks, model, train_init_op, train_steps,
checkpoint_dir, tf_config, server)
if not (args.no_eval or tf_config):
eval(sess_config, hooks, model, test_init_op, test_steps,
checkpoint_dir)
def boolean_string(string):
low_string = string.lower()
if low_string not in {'false', 'true'}:
raise ValueError('Not a valid boolean string')
return low_string == 'true'
# Get parse
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_location',
help='Full path of train data',
required=False,
default='./data')
parser.add_argument('--steps',
help='set the number of steps on train dataset',
type=int,
default=0)
parser.add_argument('--batch_size',
help='Batch size to train. Default is 512',
type=int,
default=512)
parser.add_argument('--output_dir',
help='Full path to model output directory. \
Default to ./result. Covered by --checkpoint. ',
required=False,
default='./result')
parser.add_argument('--checkpoint',
help='Full path to checkpoints input/output. \
Default to ./result/$MODEL_TIMESTAMP',
required=False)
parser.add_argument('--save_steps',
help='set the number of steps on saving checkpoints',
type=int,
default=0)
parser.add_argument('--seed',
help='set the random seed for tensorflow',
type=int,
default=2021)
parser.add_argument('--optimizer',
type=str, \
choices=['adam', 'adamasync', 'adagraddecay', 'adagrad'],
default='adamasync')
parser.add_argument('--linear_learning_rate',
help='Learning rate for linear model',
type=float,
default=0.2)
parser.add_argument('--deep_learning_rate',
help='Learning rate for deep model',
type=float,
default=0.01)
parser.add_argument('--keep_checkpoint_max',
help='Maximum number of recent checkpoint to keep',
type=int,
default=1)
parser.add_argument('--timeline',
help='number of steps on saving timeline. Default 0',
type=int,
default=0)
parser.add_argument('--protocol',
type=str,
choices=['grpc', 'grpc++', 'star_server'],
default='grpc')
parser.add_argument('--inter',
help='set inter op parallelism threads.',
type=int,
default=0)
parser.add_argument('--intra',
help='set inter op parallelism threads.',
type=int,
default=0)
parser.add_argument('--input_layer_partitioner', \
help='slice size of input layer partitioner, units MB. Default 8MB',
type=int,
default=8)
parser.add_argument('--dense_layer_partitioner', \
help='slice size of dense layer partitioner, units KB. Default 16KB',
type=int,
default=16)
parser.add_argument('--bf16',
help='enable DeepRec BF16 in deep model. Default FP32',
action='store_true')
parser.add_argument('--no_eval',
help='not evaluate trained model by eval dataset.',
action='store_true')
parser.add_argument('--tf', \
help='Use TF 1.15.5 API and disable DeepRec feature to run a baseline.',
action='store_true')
parser.add_argument('--smartstaged', \
help='Whether to enable smart staged feature of DeepRec, Default to True.',
type=boolean_string,
default=True)
parser.add_argument('--emb_fusion', \
help='Whether to enable embedding fusion, Default to True.',
type=boolean_string,
default=True)
parser.add_argument('--ev', \
help='Whether to enable DeepRec EmbeddingVariable. Default False.',
type=boolean_string,
default=False)
parser.add_argument('--ev_elimination', \
help='Feature Elimination of EmbeddingVariable Feature. Default closed.',
type=str,
choices=[None, 'l2', 'gstep'],
default=None)
parser.add_argument('--ev_filter', \
help='Feature Filter of EmbeddingVariable Feature. Default closed.',
type=str,
choices=[None, 'counter', 'cbf'],
default=None)
parser.add_argument('--op_fusion', \
help='Whether to enable Auto graph fusion feature. Default to True',
type=boolean_string,
default=True)
parser.add_argument('--micro_batch',
help='Set num for Auto Mirco Batch. Default close.',
type=int,
default=0) #TODO: Defautl to True
parser.add_argument('--adaptive_emb', \
help='Whether to enable Adaptive Embedding. Default to False.',
type=boolean_string,
default=False)
parser.add_argument('--dynamic_ev', \
help='Whether to enable Dynamic-dimension Embedding Variable. Default to False.',
type=boolean_string,
default=False)#TODO:enable
parser.add_argument('--incremental_ckpt', \
help='Set time of save Incremental Checkpoint. Default 0 to close.',
type=int,
default=0)
parser.add_argument('--workqueue', \
help='Whether to enable Work Queue. Default to False.',
type=boolean_string,
default=False)
return parser
# Parse distributed training configuration and generate cluster information
def generate_cluster_info(TF_CONFIG):
print(TF_CONFIG)
tf_config = json.loads(TF_CONFIG)
cluster_config = tf_config.get('cluster')
ps_hosts = []
worker_hosts = []
chief_hosts = []
for key, value in cluster_config.items():
if 'ps' == key:
ps_hosts = value
elif 'worker' == key:
worker_hosts = value
elif 'chief' == key:
chief_hosts = value
if chief_hosts:
worker_hosts = chief_hosts + worker_hosts
if not ps_hosts or not worker_hosts:
print('TF_CONFIG ERROR')
sys.exit()
task_config = tf_config.get('task')
task_type = task_config.get('type')
task_index = task_config.get('index') + (1 if task_type == 'worker'
and chief_hosts else 0)
if task_type == 'chief':
task_type = 'worker'
is_chief = True if task_index == 0 else False
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
server = tf.distribute.Server(cluster,
job_name=task_type,
task_index=task_index,
protocol=args.protocol)
if task_type == 'ps':
server.join()
elif task_type == 'worker':
tf_config = {
'ps_hosts': ps_hosts,
'worker_hosts': worker_hosts,
'type': task_type,
'index': task_index,
'is_chief': is_chief
}
tf_device = tf.device(
tf.train.replica_device_setter(
worker_device='/job:worker/task:%d' % task_index,
cluster=cluster))
return tf_config, server, tf_device
else:
print("Task type or index error.")
sys.exit()
# Some DeepRec's features are enabled by ENV.
# This func is used to set ENV and enable these features.
# A triple quotes comment is used to introduce these features and play an emphasizing role.
def set_env_for_DeepRec():
'''
Set some ENV for these DeepRec's features enabled by ENV.
More Detail information is shown in https://deeprec.readthedocs.io/zh/latest/index.html.
START_STATISTIC_STEP & STOP_STATISTIC_STEP: On CPU platform, DeepRec supports memory optimization
in both stand-alone and distributed trainging. It's default to open, and the
default start and stop steps of collection is 1000 and 1100. Reduce the initial
cold start time by the following settings.
MALLOC_CONF: On CPU platform, DeepRec can use memory optimization with the jemalloc library.
Please preload libjemalloc.so by `LD_PRELOAD=./libjemalloc.so.2 python ...`
'''
os.environ['START_STATISTIC_STEP'] = '100'
os.environ['STOP_STATISTIC_STEP'] = '110'
os.environ['MALLOC_CONF']= \
'background_thread:true,metadata_thp:auto,dirty_decay_ms:20000,muzzy_decay_ms:20000'
if __name__ == '__main__':
parser = get_arg_parser()
args = parser.parse_args()
if not args.tf:
set_env_for_DeepRec()
TF_CONFIG = os.getenv('TF_CONFIG')
if not TF_CONFIG:
main()
else:
tf_config, server, tf_device = generate_cluster_info(TF_CONFIG)
main(tf_config, server)
|
from sqlalchemy import Table, Column, MetaData, Float
meta = MetaData()
def upgrade(migrate_engine):
conn = migrate_engine.connect()
trans = conn.begin()
try:
meta.bind = conn
task = Table('task', meta, autoload=True)
task_loss = Column('loss', Float)
task_loss.create(task)
except Exception:
trans.rollback()
raise
else:
trans.commit()
def downgrade(migrate_engine):
conn = migrate_engine.connect()
trans = conn.begin()
try:
meta.bind = conn
task = Table('task', meta, autoload=True)
task.c.loss.drop()
except Exception:
trans.rollback()
raise
else:
trans.commit()
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
from modeling.generate_anchors import generate_anchors
from utils.c2 import const_fill
from utils.c2 import gauss_fill
import modeling.FPN as FPN
import utils.blob as blob_utils
# ---------------------------------------------------------------------------- #
# RPN and Faster R-CNN outputs and losses
# ---------------------------------------------------------------------------- #
def add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
loss_gradients = None
if cfg.FPN.FPN_ON:
# Delegate to the FPN module
FPN.add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
# CollectAndDistributeFpnRpnProposals also labels proposals when in
# training mode
model.CollectAndDistributeFpnRpnProposals()
if model.train:
loss_gradients = FPN.add_fpn_rpn_losses(model)
else:
# Not using FPN, add RPN to a single scale
add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if model.train:
loss_gradients = add_single_scale_rpn_losses(model)
return loss_gradients
def add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale):
"""Add RPN outputs to a single scale model (i.e., no FPN)."""
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS
)
num_anchors = anchors.shape[0]
dim_out = dim_in
# RPN hidden representation
model.Conv(
blob_in,
'conv_rpn',
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu('conv_rpn', 'conv_rpn')
# Proposal classification scores
model.Conv(
'conv_rpn',
'rpn_cls_logits',
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
model.Conv(
'conv_rpn',
'rpn_bbox_pred',
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(
['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'],
['rpn_rois', 'rpn_roi_probs'],
anchors=anchors,
spatial_scale=spatial_scale
)
if cfg.MODEL.FASTER_RCNN:
if model.train:
# Add op that generates training labels for in-network RPN proposals
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
# Alias rois to rpn_rois for inference
model.net.Alias('rpn_rois', 'rois')
def add_single_scale_rpn_losses(model):
"""Add losses for a single scale RPN model (i.e., no FPN)."""
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32'
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key
)
loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
['rpn_cls_logits', 'rpn_labels_int32'],
'loss_rpn_cls',
scale=1. / cfg.NUM_GPUS
)
loss_rpn_bbox = model.net.SmoothL1Loss(
[
'rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
'rpn_bbox_outside_weights'
],
'loss_rpn_bbox',
beta=1. / 9.,
scale=1. / cfg.NUM_GPUS
)
loss_gradients = blob_utils.get_loss_gradients(
model, [loss_rpn_cls, loss_rpn_bbox]
)
model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
return loss_gradients
|
#!/usr/bin/env python
"""
shgyield.py is a python module for exploring the SHG optical response of
materials. It is well suited for 2D-materials, surfaces, bulks, and
metamaterials. For a complete overview of the theory, see PRB 94, 115314 (2016).
todo:
* SHG: SOME Nv=1 INSTANCES ARE HARDCODED, NEED TO GO BACK AND CHANGE
* Allow for saving all data and parameters to NetCDF file, and final data to txt
* Develop SHG functions into class
* Improve rotation function and avoid running unless changed
* Improve spline function and avoid running unless changed
* Convert to absolute broadening to avoid trouble with polar plots
* Develop GUI to ingest and pre-process data, provide initial values, etc.
* Include every symmetry group (see Popov) into menus
"""
import numpy as np
from scipy import constants, ndimage
from scipy.interpolate import InterpolatedUnivariateSpline
np.seterr(divide='ignore', invalid='ignore', over='ignore') # ignores overflow and divide-by-zero
def broad(data, sigma):
''' applies Gaussian broadening to real number '''
return ndimage.filters.gaussian_filter(data, sigma)
def broadC(data, sigma):
''' applies Gaussian broadening to complex and returns complex '''
real = ndimage.filters.gaussian_filter(data.real, sigma)
imag = ndimage.filters.gaussian_filter(data.imag, sigma)
return real + 1j*imag
def spline(data, energy):
''' returns spline object '''
return InterpolatedUnivariateSpline(energy, data, ext=2)
def splineC(data, energy):
''' creates a spline for complex and returns tuple with spline objects '''
real = InterpolatedUnivariateSpline(energy, data.real, ext=2)
imag = InterpolatedUnivariateSpline(energy, data.imag, ext=2)
return (real, imag)
def splineEPS(data, energy):
''' IMPROVE: creates a spline for EPS, returns 1w and 2w '''
splines = {key: splineC(val['data'], val['energy']) for key, val in data.items()}
new1w = {key: val[0](energy) + 1j*val[1](energy) for key, val in splines.items()}
new2w = {key: val[0](2*energy) + 1j*val[1](2*energy) for key, val in splines.items()}
return (new1w, new2w)
def avgEPS(data):
''' IMPROVE: averages over all values in a dict '''
return np.mean(list(data.values()), axis=0)
def rotate(chi2, rotang):
## in-plane rotatation for chi2 tensor components
gamma = np.radians(90) - rotang
chi2rot = {
'xxx' : + np.sin(gamma)**3*chi2['xxx'] \
+ np.sin(gamma)*np.cos(gamma)**2*chi2['xyy'] \
- 2*np.sin(gamma)**2*np.cos(gamma)*chi2['xxy'] \
- np.sin(gamma)**2*np.cos(gamma)*chi2['yxx'] \
- np.cos(gamma)**3*chi2['yyy'] \
+ 2*np.sin(gamma)*np.cos(gamma)**2*chi2['yxy'],
'xyy' : + np.sin(gamma)*np.cos(gamma)**2*chi2['xxx'] \
+ np.sin(gamma)**3*chi2['xyy'] \
+ 2*np.sin(gamma)**2*np.cos(gamma)*chi2['xxy'] \
- np.cos(gamma)**3*chi2['yxx'] \
- np.sin(gamma)**2*np.cos(gamma)*chi2['yyy'] \
- 2*np.sin(gamma)*np.cos(gamma)**2*chi2['yxy'],
'xzz' : + np.sin(gamma)*chi2['xzz'] - np.cos(gamma)*chi2['yzz'],
'xyz' : + np.sin(gamma)**2*chi2['xyz'] \
+ np.sin(gamma)*np.cos(gamma)*chi2['xxz'] \
- np.sin(gamma)*np.cos(gamma)*chi2['yyz'] \
- np.cos(gamma)**2*chi2['yxz'],
'xxz' : - np.sin(gamma)*np.cos(gamma)*chi2['xyz'] \
+ np.sin(gamma)**2*chi2['xxz'] \
+ np.cos(gamma)**2*chi2['yyz'] \
- np.sin(gamma)*np.cos(gamma)*chi2['yxz'],
'xxy' : + np.sin(gamma)**2*np.cos(gamma)*chi2['xxx'] \
- np.sin(gamma)**2*np.cos(gamma)*chi2['xyy'] \
+ (np.sin(gamma)**3 - np.sin(gamma)*np.cos(gamma)**2)*chi2['xxy'] \
- np.sin(gamma)*np.cos(gamma)**2*chi2['yxx'] \
- np.sin(gamma)*np.cos(gamma)**2*chi2['yyy'] \
+ (np.cos(gamma)**3 - np.sin(gamma)**2*np.cos(gamma))*chi2['yxy'],
'yxx' : + np.sin(gamma)**2*np.cos(gamma)*chi2['xxx'] \
+ np.cos(gamma)**3*chi2['xyy'] \
- 2*np.sin(gamma)*np.cos(gamma)**2*chi2['xxy'] \
+ np.sin(gamma)**3*chi2['yxx'] \
+ np.sin(gamma)*np.cos(gamma)**2*chi2['yyy'] \
- 2*np.sin(gamma)**2*np.cos(gamma)*chi2['yxy'],
'yyy' : + np.cos(gamma)**3*chi2['xxx'] \
+ np.sin(gamma)**2*np.cos(gamma)*chi2['xyy'] \
+ 2*np.sin(gamma)*np.cos(gamma)**2*chi2['xxy'] \
+ np.sin(gamma)*np.cos(gamma)**2*chi2['yxx'] \
+ np.sin(gamma)**3*chi2['yyy'] \
+ 2*np.sin(gamma)**2*np.cos(gamma)*chi2['yxy'],
'yzz' : + np.cos(gamma)*chi2['xzz'] + np.sin(gamma)*chi2['yzz'],
'yyz' : + np.sin(gamma)*np.cos(gamma)*chi2['xyz'] \
+ np.cos(gamma)**2*chi2['xxz'] \
+ np.sin(gamma)**2*chi2['yyz'] \
+ np.sin(gamma)*np.cos(gamma)*chi2['yxz'],
'yxz' : - np.cos(gamma)**2*chi2['xyz'] \
+ np.sin(gamma)*np.cos(gamma)*chi2['xxz'] \
- np.sin(gamma)*np.cos(gamma)*chi2['yyz'] \
+ np.sin(gamma)**2*chi2['yxz'],
'yxy' : + np.sin(gamma)*np.cos(gamma)**2*chi2['xxx'] \
- np.sin(gamma)*np.cos(gamma)**2*chi2['xyy'] \
- (np.cos(gamma)**3 - np.sin(gamma)**2*np.cos(gamma))*chi2['xxy'] \
+ np.sin(gamma)**2*np.cos(gamma)*chi2['yxx'] \
- np.sin(gamma)**2*np.cos(gamma)*chi2['yyy'] \
+ (np.sin(gamma)**3 - np.sin(gamma)*np.cos(gamma)**2)*chi2['yxy'],
'zxx' : + np.sin(gamma)**2*chi2['zxx'] \
+ np.cos(gamma)**2*chi2['zyy'] \
- 2*np.sin(gamma)*np.cos(gamma)*chi2['zxy'],
'zyy' : + np.cos(gamma)**2*chi2['zxx'] \
+ np.sin(gamma)**2*chi2['zyy'] \
+ 2*np.sin(gamma)*np.cos(gamma)*chi2['zxy'],
'zzz' : + chi2['zzz'],
'zyz' : + np.sin(gamma)*chi2['zyz'] + np.cos(gamma)*chi2['zxz'],
'zxz' : - np.cos(gamma)*chi2['zyz'] + np.sin(gamma)*chi2['zxz'],
'zxy' : + np.sin(gamma)*np.cos(gamma)*chi2['zxx'] \
- np.sin(gamma)*np.cos(gamma)*chi2['zyy'] \
- np.cos(2*gamma)*chi2['zxy']
}
return chi2rot
def wvec(eps, theta):
'''
Wave vector, where w = sqrt[epsilon - sin(theta)^2].
'''
return np.sqrt(eps - (np.sin(theta)**2))
def frefs(epsi, epsj, theta):
'''
Generic reflection fresnel factors, see Eqs. (13) and (14) of PRB 94, 115314 (2016).
'''
return (wvec(epsi, theta) - wvec(epsj, theta))/(wvec(epsi, theta) + wvec(epsj, theta))
def frefp(epsi, epsj, theta):
'''
Generic reflection fresnel factors, see Eqs. (13) and (14) of PRB 94, 115314 (2016).
'''
return ((wvec(epsi, theta) * epsj) - (wvec(epsj, theta) * epsi))/\
((wvec(epsi, theta) * epsj) + (wvec(epsj, theta) * epsi))
def ftrans(epsi, epsj, theta):
'''
s-polarized transmission fresnel factors , see Eqs. (13) and (14) of PRB 94, 115314 (2016).
'''
return (2 * wvec(epsi, theta))/(wvec(epsi, theta) + wvec(epsj, theta))
def ftranp(epsi, epsj, theta):
'''
p-polarized transmission fresnel factors, see Eqs. (13) and (14) of PRB 94, 115314 (2016).
'''
return (2 * wvec(epsi, theta) * np.sqrt(epsi * epsj))/\
(wvec(epsi, theta) * epsj + wvec(epsj, theta) * epsi)
def mrc2w(energy, eps0, fres1, fres2, theta, thick, mref):
'''
2w multiple reflection coefficient, see Eq. (18) of PRB 94, 115314 (2016).
THICKNESS MUST BE IN NANOMETERS!!!
'''
if mref:
delta = 8*np.pi * ((energy * thick * 1e-9)/\
(constants.value("Planck constant in eV s") * constants.c)) * wvec(eps0, theta)
return (fres1 * np.exp(1j * (delta/2)))/\
(1 + (fres2 * fres1 * np.exp(1j * delta))) * np.sinc(delta/2)
elif not mref:
return fres1
def mrc1w(energy, eps0, fres1, fres2, theta, thick, mref):
'''
1w multiple reflection coefficient, see Eq. (21) of PRB 94, 115314 (2016).
'''
if mref:
varphi = 4*np.pi * ((energy * thick * 1e-9)/\
(constants.value("Planck constant in eV s") * constants.c)) * wvec(eps0, theta)
return (fres1 * np.exp(1j * varphi))/\
(1 + (fres2 * fres1 * np.exp(1j * varphi)))
elif not mref:
return fres1
def rad_pp(energy, eps1w, eps2w, chi2, theta, phi, thick, mref):
'''
rpP, see Eq. (50) of PRB 94, 115314 (2016).
'''
fres2p = mrc2w(energy,
eps2w['M2'],
frefp(eps2w['M2'], eps2w['M3'], theta),
frefp(eps2w['M1'], eps2w['M2'], theta),
theta, thick, mref)
fres1p = mrc1w(energy,
eps1w['M2'],
frefp(eps1w['M2'], eps1w['M3'], theta),
frefp(eps1w['M1'], eps1w['M2'], theta),
theta, thick, mref)
pre = (ftranp(eps2w['M1'], eps2w['M2'], theta)/np.sqrt(eps2w['M2'])) * \
(ftranp(eps1w['M1'], eps1w['M2'], theta)/np.sqrt(eps1w['M2']))**2
### r_{pP}
rpp = - ((1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.cos(phi)**3 * chi2['xxx']) \
- (2 * (1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.sin(phi) * np.cos(phi)**2 * chi2['xxy']) \
- (2 * (1 - fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) * wvec(eps2w['M2'], theta) \
* np.sin(theta) * np.cos(phi)**2 * chi2['xxz']) \
- ((1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.sin(phi)**2 * np.cos(phi) * chi2['xyy']) \
- (2 * (1 - fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) * wvec(eps2w['M2'], theta) \
* np.sin(theta) * np.sin(phi) * np.cos(phi) * chi2['xyz']) \
- ((1 - fres2p) * (1 + fres1p)**2 \
* wvec(eps2w['M2'], theta) \
* np.sin(theta)**2 * np.cos(phi) * chi2['xzz']) \
- ((1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.sin(phi) * np.cos(phi)**2 * chi2['yxx']) \
- (2 * (1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.sin(phi)**2 * np.cos(phi) * chi2['yxy']) \
- (2 * (1 - fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) * wvec(eps2w['M2'], theta) \
* np.sin(theta) * np.sin(phi) * np.cos(phi) * chi2['yxz']) \
- ((1 - fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 * wvec(eps2w['M2'], theta) \
* np.sin(phi)**3 * chi2['yyy']) \
- (2 * (1 - fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) * wvec(eps2w['M2'], theta) \
* np.sin(theta) * np.sin(phi)**2 * chi2['yyz']) \
- ((1 - fres2p) * (1 + fres1p)**2 \
* wvec(eps2w['M2'], theta) \
* np.sin(theta)**2 * np.sin(phi) * chi2['yzz']) \
+ ((1 + fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 \
* np.sin(theta) * np.cos(phi)**2 * chi2['zxx']) \
+ (2 * (1 + fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) \
* np.sin(theta)**2 * np.cos(phi) * chi2['zxz']) \
+ (2 * (1 + fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 \
* np.sin(theta) * np.sin(phi) * np.cos(phi) * chi2['zxy']) \
+ ((1 + fres2p) * (1 - fres1p)**2 \
* wvec(eps1w['M2'], theta)**2 \
* np.sin(theta) * np.sin(phi)**2 * chi2['zyy']) \
+ (2 * (1 + fres2p) * (1 + fres1p) * (1 - fres1p) \
* wvec(eps1w['M2'], theta) \
* np.sin(theta)**2 * np.sin(phi) * chi2['zyz']) \
+ ((1 + fres2p) * (1 + fres1p)**2 * np.sin(phi)**3 * chi2['zzz'])
return pre*rpp
def rad_ps(energy, eps1w, eps2w, chi2, theta, phi, thick, mref):
'''
rpS, see Eq. (55) of PRB 94, 115314 (2016).
'''
fres2s = mrc2w(energy,
eps2w['M2'],
frefs(eps2w['M2'], eps2w['M3'], theta),
frefs(eps2w['M1'], eps2w['M2'], theta),
theta, thick, mref)
fres1p = mrc1w(energy,
eps1w['M2'],
frefp(eps1w['M2'], eps1w['M3'], theta),
frefp(eps1w['M1'], eps1w['M2'], theta),
theta, thick, mref)
pre = (ftrans(eps2w['M1'], eps2w['M2'], theta) * (1 + fres2s)) * \
(ftranp(eps1w['M1'], eps1w['M2'], theta)/np.sqrt(eps1w['M2']))**2
### r_{pS}
rps = - ((1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.sin(phi) * np.cos(phi)**2 * chi2['xxx']) \
- (2 * (1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.sin(phi)**2 * np.cos(phi) * chi2['xxy']) \
- (2 * (1 + fres1p) * (1 - fres1p) * wvec(eps1w['M2'], theta) \
* np.sin(theta) * np.sin(phi) * np.cos(phi) * chi2['xxz']) \
- ((1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.sin(phi)**3 * chi2['xyy']) \
- (2 * (1 + fres1p) * (1 - fres1p) * wvec(eps1w['M2'], theta) \
* np.sin(theta) * np.sin(phi)**2 * chi2['xyz']) \
- ((1 + fres1p)**2 \
* np.sin(theta)**2 * np.sin(phi) * chi2['xzz']) \
+ ((1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.cos(phi)**3 * chi2['yxx']) \
+ (2 * (1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.sin(phi) * np.cos(phi)**2 * chi2['yxy']) \
+ (2 * (1 + fres1p) * (1 - fres1p) * wvec(eps1w['M2'], theta) \
* np.sin(theta) * np.cos(phi)**2 * chi2['yxz']) \
+ ((1 - fres1p)**2 * wvec(eps1w['M2'], theta)**2 \
* np.sin(phi)**2 * np.cos(phi) * chi2['yyy']) \
+ (2 * (1 + fres1p) * (1 - fres1p) * wvec(eps1w['M2'], theta) \
* np.sin(theta) * np.sin(phi) * np.cos(phi) * chi2['yyz']) \
+ ((1 + fres1p)**2 \
* np.sin(theta)**2 * np.cos(phi) * chi2['yzz'])
return pre*rps
def rad_sp(energy, eps1w, eps2w, chi2, theta, phi, thick, mref):
'''
rsP, see Eq. (60) of PRB 94, 115314 (2016).
'''
fres2p = mrc2w(energy,
eps2w['M2'],
frefp(eps2w['M2'], eps2w['M3'], theta),
frefp(eps2w['M1'], eps2w['M2'], theta),
theta, thick, mref)
fres1s = mrc1w(energy,
eps1w['M2'],
frefs(eps1w['M2'], eps1w['M3'], theta),
frefs(eps1w['M1'], eps1w['M2'], theta),
theta, thick, mref)
pre = (ftranp(eps2w['M1'], eps2w['M2'], theta)/np.sqrt(eps2w['M2'])) * \
(ftrans(eps1w['M1'], eps1w['M2'], theta) * (1 + fres1s))**2
### r_{sP}
rsp = - ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* np.sin(phi)**2 * np.cos(phi) * chi2['xxx']) \
+ ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* 2 * np.sin(phi) * np.cos(phi)**2 * chi2['xxy']) \
- ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* np.cos(phi)**3 * chi2['xyy']) \
- ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* np.sin(phi)**3 * chi2['yxx']) \
+ ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* 2 * np.sin(phi)**2 * np.cos(phi) * chi2['yxy']) \
- ((1 - fres2p) * wvec(eps2w['M2'], theta) \
* np.sin(phi) * np.cos(phi)**2 * chi2['yyy']) \
+ ((1 + fres2p) \
* np.sin(theta) * np.sin(phi)**2 * chi2['zxx']) \
- ((1 + fres2p) \
* np.sin(theta) * 2 * np.sin(phi) * np.cos(phi) * chi2['zxy']) \
+ ((1 + fres2p) \
* np.sin(theta) * np.cos(phi)**2 * chi2['zyy'])
return pre*rsp
def rad_ss(energy, eps1w, eps2w, chi2, theta, phi, thick, mref):
'''
rsS, see Eq. (65) of PRB 94, 115314 (2016).
'''
fres2s = mrc2w(energy,
eps2w['M2'],
frefs(eps2w['M2'], eps2w['M3'], theta),
frefs(eps2w['M1'], eps2w['M2'], theta),
theta, thick, mref)
fres1s = mrc1w(energy,
eps1w['M2'],
frefs(eps1w['M2'], eps1w['M3'], theta),
frefs(eps1w['M1'], eps1w['M2'], theta),
theta, thick, mref)
pre = (ftrans(eps2w['M1'], eps2w['M2'], theta) * (1 + fres2s)) * \
(ftrans(eps1w['M1'], eps1w['M2'], theta) * (1 + fres1s))**2
### r_{sS}
rss = - (np.sin(phi)**3 * chi2['xxx']) \
+ (2 * np.sin(phi)**2 * np.cos(phi) * chi2['xxy']) \
- (np.sin(phi) * np.cos(phi)**2 * chi2['xyy']) \
+ (np.sin(phi)**2 * np.cos(phi) * chi2['yxx']) \
+ (np.cos(phi)**3 * chi2['yyy']) \
- (2 * np.sin(phi) * np.cos(phi)**2 * chi2['yxy'])
return pre*rss
def shgyield(energy, eps_m1, eps_m2, eps_m3, chi2, theta, phi, thick, gamma=90, sigma_eps=0.0, sigma_chi=0.0, sigma_out=5.0, mode='3-layer', mref=True):
'''
Calculates the final SHG yield, see Eq. (38) of PRB 94, 115314 (2016).
THICKNESS MUST BE IN NANOMETERS!!!
'''
eps_m1_num = {key: val for key, val in eps_m1.items() if not isinstance(val, dict)}
eps_m1_brd = {key: {'energy': val['energy'], 'data': broadC(val['data'], sigma_eps)} for key, val in eps_m1.items() if isinstance(val, dict)}
eps_m1_spl = splineEPS(eps_m1_brd, energy)
eps_m1_spl[0].update(eps_m1_num)
eps_m1_spl[1].update(eps_m1_num)
eps_m2_num = {key: val for key, val in eps_m2.items() if not isinstance(val, dict)}
eps_m2_brd = {key: {'energy': val['energy'], 'data': broadC(val['data'], sigma_eps)} for key, val in eps_m2.items() if isinstance(val, dict)}
eps_m2_spl = splineEPS(eps_m2_brd, energy)
eps_m2_spl[0].update(eps_m2_num)
eps_m2_spl[1].update(eps_m2_num)
eps_m3_num = {key: val for key, val in eps_m3.items() if not isinstance(val, dict)}
eps_m3_brd = {key: {'energy': val['energy'], 'data': broadC(val['data'], sigma_eps)} for key, val in eps_m3.items() if isinstance(val, dict)}
eps_m3_spl = splineEPS(eps_m3_brd, energy)
eps_m3_spl[0].update(eps_m3_num)
eps_m3_spl[1].update(eps_m3_num)
if mode == '3-layer':
eps1w = {'M1': avgEPS(eps_m1_spl[0]),
'M2': avgEPS(eps_m2_spl[0]),
'M3': avgEPS(eps_m3_spl[0])}
eps2w = {'M1': avgEPS(eps_m1_spl[1]),
'M2': avgEPS(eps_m2_spl[1]),
'M3': avgEPS(eps_m3_spl[1])}
elif mode == 'fresnel' or mode == '2-layer':
## Fresnel reflection model, see PRB 93, 235304 (2016) and references therein.
thick = 0
eps1w = {'M1' : avgEPS(eps_m1_spl[0]),
'M2' : avgEPS(eps_m3_spl[0]),
'M3' : avgEPS(eps_m3_spl[0])}
eps2w = {'M1' : avgEPS(eps_m1_spl[1]),
'M2' : avgEPS(eps_m1_spl[1]),
'M3' : avgEPS(eps_m3_spl[1])}
chi2_num = {key: val for key, val in chi2.items() if not isinstance(val, dict)}
chi2_spl = {key: splineC(broadC(val['data'], sigma_chi), val['energy']) for key, val in chi2.items() if isinstance(val, dict)}
chi2_new = {key: val[0](energy) + 1j*val[1](energy) for key, val in chi2_spl.items()}
chi2_new.update(chi2_num)
chi2_rot = rotate(chi2_new, np.radians(gamma))
m2tocm2 = 1e4 # Convert from m^2 to cm^2
prefactor = m2tocm2 * ((energy/constants.value("Planck constant over 2 pi in eV s"))**2)/\
(2 * constants.epsilon_0 * constants.c**3 * np.cos(np.radians(theta))**2)
dakkar = {'energy': energy,
'phi': phi,
'pp': broad(prefactor * np.absolute(1/np.sqrt(eps1w['M2']) * rad_pp(energy, eps1w, eps2w, chi2_rot, np.radians(theta), np.radians(phi), thick, mref))**2, sigma_out),
'ps': broad(prefactor * np.absolute(1/np.sqrt(eps1w['M2']) * rad_ps(energy, eps1w, eps2w, chi2_rot, np.radians(theta), np.radians(phi), thick, mref))**2, sigma_out),
'sp': broad(prefactor * np.absolute(1/np.sqrt(eps1w['M2']) * rad_sp(energy, eps1w, eps2w, chi2_rot, np.radians(theta), np.radians(phi), thick, mref))**2, sigma_out),
'ss': broad(prefactor * np.absolute(1/np.sqrt(eps1w['M2']) * rad_ss(energy, eps1w, eps2w, chi2_rot, np.radians(theta), np.radians(phi), thick, mref))**2, sigma_out)}
return dakkar
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, KeyVaultPreparer, record_only
from azure.cli.command_modules.acr.custom import DEF_DIAG_SETTINGS_NAME_TEMPLATE
class AcrCommandsTests(ScenarioTest):
def _core_registry_scenario(self, registry_name, resource_group, location):
self.cmd('acr check-name -n {}'.format(registry_name),
checks=[self.check('nameAvailable', False),
self.check('reason', 'AlreadyExists')])
self.cmd('acr list -g {}'.format(resource_group),
checks=[self.check('[0].name', registry_name),
self.check('[0].location', location),
self.check('[0].adminUserEnabled', False)])
registry = self.cmd('acr show -n {} -g {}'.format(registry_name, resource_group),
checks=[self.check('name', registry_name),
self.check('location', location),
self.check('adminUserEnabled', False)]).get_output_in_json()
if registry['sku']['name'] == 'Premium':
self.cmd('acr show-usage -n {} -g {}'.format(registry_name, resource_group))
# enable admin user
self.cmd('acr update -n {} -g {} --tags foo=bar cat --admin-enabled true'.format(registry_name, resource_group),
checks=[self.check('name', registry_name),
self.check('location', location),
self.check('tags', {'cat': '', 'foo': 'bar'}),
self.check('adminUserEnabled', True),
self.check('provisioningState', 'Succeeded')])
# test retention
self.cmd('acr config retention update -r {} --status enabled --days 30 --type UntaggedManifests'.format(registry_name),
checks=[self.check('status', "enabled"),
self.check('days', 30)])
self.cmd('acr config retention show -r {}'.format(registry_name),
checks=[self.check('status', "enabled"),
self.check('days', 30)])
# test content-trust
self.cmd('acr config content-trust update -n {} --status enabled'.format(registry_name),
checks=[self.check('status', "enabled")])
self.cmd('acr config content-trust show -n {}'.format(registry_name),
checks=[self.check('status', "enabled")])
# test credential module
credential = self.cmd(
'acr credential show -n {} -g {}'.format(registry_name, resource_group)).get_output_in_json()
username = credential['username']
password = credential['passwords'][0]['value']
password2 = credential['passwords'][1]['value']
assert username and password and password2
# renew password
credential = self.cmd('acr credential renew -n {} -g {} --password-name {}'.format(
registry_name, resource_group, 'password')).get_output_in_json()
renewed_username = credential['username']
renewed_password = credential['passwords'][0]['value']
renewed_password2 = credential['passwords'][1]['value']
assert renewed_username and renewed_password and renewed_password2
assert username == renewed_username
assert password != renewed_password
assert password2 == renewed_password2
# renew password2
credential = self.cmd('acr credential renew -n {} -g {} --password-name {}'.format(
registry_name, resource_group, 'password2')).get_output_in_json()
renewed_username = credential['username']
renewed_password = credential['passwords'][0]['value']
renewed_password2 = credential['passwords'][1]['value']
assert renewed_username and renewed_password and renewed_password2
assert username == renewed_username
assert password != renewed_password
assert password2 != renewed_password2
# test acr delete
self.cmd('acr delete -n {} -g {} -y'.format(registry_name, resource_group))
def test_check_name_availability(self):
# the chance of this randomly generated name has a duplication is rare
name = self.create_random_name('clireg', 20)
self.kwargs.update({
'name': name
})
self.cmd('acr check-name -n {name}', checks=[
self.check('nameAvailable', True)
])
@ResourceGroupPreparer()
def test_acr_create_with_managed_registry(self, resource_group, resource_group_location):
registry_name = self.create_random_name('clireg', 20)
self.kwargs.update({
'registry_name': registry_name,
'rg_loc': resource_group_location,
'sku': 'Premium'
})
self.cmd('acr create -n {registry_name} -g {rg} -l {rg_loc} --sku {sku}',
checks=[self.check('name', '{registry_name}'),
self.check('location', '{rg_loc}'),
self.check('adminUserEnabled', False),
self.check('sku.name', 'Premium'),
self.check('sku.tier', 'Premium'),
self.check('provisioningState', 'Succeeded')])
self._core_registry_scenario(registry_name, resource_group, resource_group_location)
@ResourceGroupPreparer()
def test_acr_create_webhook(self, resource_group, resource_group_location):
registry_name = self.create_random_name('clireg', 20)
webhook_name = 'cliregwebhook'
self.kwargs.update({
'registry_name': registry_name,
'webhook_name': webhook_name,
'rg_loc': resource_group_location,
'headers': 'key=value',
'webhook_scope': 'hello-world',
'uri': 'http://www.microsoft.com',
'actions': 'push',
'sku': 'Standard'
})
self.cmd('acr create -n {registry_name} -g {rg} -l {rg_loc} --sku {sku}',
checks=[self.check('name', '{registry_name}'),
self.check('location', '{rg_loc}'),
self.check('adminUserEnabled', False),
self.check('sku.name', 'Standard'),
self.check('sku.tier', 'Standard'),
self.check('provisioningState', 'Succeeded')])
self.cmd('acr webhook create -n {webhook_name} -r {registry_name} --uri {uri} --actions {actions}',
checks=[self.check('name', '{webhook_name}'),
self.check('location', '{rg_loc}'),
self.check('status', 'enabled'),
self.check('provisioningState', 'Succeeded')])
self.cmd('acr webhook list -r {registry_name}',
checks=[self.check('[0].name', '{webhook_name}'),
self.check('[0].status', 'enabled'),
self.check('[0].provisioningState', 'Succeeded')])
self.cmd('acr webhook show -n {webhook_name} -r {registry_name}',
checks=[self.check('name', '{webhook_name}'),
self.check('status', 'enabled'),
self.check('provisioningState', 'Succeeded')])
# update webhook
self.cmd('acr webhook update -n {webhook_name} -r {registry_name} --headers {headers} --scope {webhook_scope}',
checks=[self.check('name', '{webhook_name}'),
self.check('status', 'enabled'),
self.check('provisioningState', 'Succeeded'),
self.check('scope', '{webhook_scope}')])
# get webhook config
self.cmd('acr webhook get-config -n {webhook_name} -r {registry_name}',
checks=[self.check('serviceUri', '{uri}'),
self.check('customHeaders', {'key': 'value'})])
# ping
self.cmd('acr webhook ping -n {webhook_name} -r {registry_name}', checks=[self.exists('id')])
# list webhook events
self.cmd('acr webhook list-events -n {webhook_name} -r {registry_name}')
# get registry usage
self.cmd('acr show-usage -n {registry_name} -g {rg}',
checks=[self.check('value[?name==`Size`]|[0].currentValue', 0),
self.greater_than('value[?name==`Size`]|[0].limit', 0),
self.check('value[?name==`Webhooks`]|[0].currentValue', 1),
self.greater_than('value[?name==`Webhooks`]|[0].limit', 0)])
# test webhook delete
self.cmd('acr webhook delete -n {webhook_name} -r {registry_name}')
# test acr delete
self.cmd('acr delete -n {registry_name} -g {rg} -y')
|
'''
Factory object
==============
The factory can be used to automatically import any class from a module,
by specifying the module to import instead of the class instance.
The class list and available modules are automatically generated by setup.py.
Example for registering a class/module::
>>> from kivy.factory import Factory
>>> Factory.register('Widget', module='kivy.uix.widget')
>>> Factory.register('Vector', module='kivy.vector')
Example of using the Factory::
>>> from kivy.factory import Factory
>>> widget = Factory.Widget(pos=(456,456))
>>> vector = Factory.Vector(9, 2)
Example using a class name::
>>> from kivy.factory import Factory
>>> Factory.register('MyWidget', cls=MyWidget)
By default, the first classname you register via the factory is permanent.
If you wish to change the registered class, you need to unregister the classname
before you re-assign it::
>>> from kivy.factory import Factory
>>> Factory.register('MyWidget', cls=MyWidget)
>>> widget = Factory.MyWidget()
>>> Factory.unregister('MyWidget')
>>> Factory.register('MyWidget', cls=CustomWidget)
>>> customWidget = Factory.MyWidget()
'''
__all__ = ('Factory', 'FactoryException')
from kivy.logger import Logger
class FactoryException(Exception):
pass
class FactoryBase(object):
def __init__(self):
super(FactoryBase, self).__init__()
self.classes = {}
def is_template(self, classname):
'''Return True is the classname is a template from
:class:`~kivy.lang.Builder`.
.. versionadded:: 1.0.5
'''
if classname in self.classes:
return self.classes[classname]['is_template']
else:
return False
def register(self, classname, cls=None, module=None, is_template=False,
baseclasses=None, filename=None):
'''Register a new classname refering to a real class or
class definition in a module.
.. versionchanged:: 1.7.0
:data:`baseclasses` and :data:`filename` added
.. versionchanged:: 1.0.5
:data:`is_template` have been added in 1.0.5.
'''
if cls is None and module is None and baseclasses is None:
raise ValueError(
'You must specify either cls= or module= or baseclasses =')
if classname in self.classes:
return
self.classes[classname] = {
'module': module,
'cls': cls,
'is_template': is_template,
'baseclasses': baseclasses,
'filename': filename}
def unregister(self, *classnames):
'''Unregisters the classnames previously registered via the
register method. This allows the same classnames to be re-used in
different contexts.
.. versionadded:: 1.7.1
'''
for classname in classnames:
if classname in self.classes:
self.classes.pop(classname)
def unregister_from_filename(self, filename):
'''Unregister all the factory object related to the filename passed in
the parameter.
.. versionadded:: 1.7.0
'''
to_remove = [x for x in self.classes
if self.classes[x]['filename'] == filename]
for name in to_remove:
del self.classes[name]
def __getattr__(self, name):
classes = self.classes
if name not in classes:
raise FactoryException('Unknown class <%s>' % name)
item = classes[name]
cls = item['cls']
# No class to return, import the module
if cls is None:
if item['module']:
module = __import__(name=item['module'], fromlist='.')
if not hasattr(module, name):
raise FactoryException(
'No class named <%s> in module <%s>' % (
name, item['module']))
cls = item['cls'] = getattr(module, name)
elif item['baseclasses']:
rootwidgets = []
for basecls in item['baseclasses'].split('+'):
rootwidgets.append(Factory.get(basecls))
cls = item['cls'] = type(name, tuple(rootwidgets), {})
else:
raise FactoryException('No information to create the class')
return cls
get = __getattr__
#: Factory instance to use for getting new classes
Factory = FactoryBase()
# Now import the file with all registers
# automatically generated by build_factory
import kivy.factory_registers
Logger.info('Factory: %d symbols loaded' % len(Factory.classes))
if __name__ == '__main__':
Factory.register('Vector', module='kivy.vector')
Factory.register('Widget', module='kivy.uix.widget')
|
# Generated by Django 2.0.3 on 2018-03-25 18:23
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app_ifoundadog', '0002_auto_20180325_1645'),
]
operations = [
migrations.CreateModel(
name='UserDogProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dog_name', models.CharField(max_length=32)),
('is_lost', models.BooleanField(default=False)),
('first_name', models.CharField(default='First Name', max_length=64)),
('last_name', models.CharField(default='Last Name', max_length=64)),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='profile/%Y/%m/%d')),
('phone_number', models.CharField(default='XXXXXXXXXX', max_length=12)),
('license_id', models.CharField(default='XXXXXXX', max_length=7)),
('owner_address', models.CharField(default='Address', max_length=256)),
('payment_date', models.DateTimeField(default=datetime.datetime.now)),
('years_issued', models.IntegerField(default=1)),
('dog_sex_choices', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], default='Male', max_length=6)),
('neutered', models.BooleanField(default=False)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dog_profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='UserProfile',
),
]
|
import os
import sys
from pathlib import Path
# this has to be before imports from kf_lib
lib_path = Path('..').resolve()
os.chdir(lib_path)
if lib_path not in sys.path:
sys.path.append(str(lib_path))
import pandas as pd
TIER_MAX = 10
QI_COST_PER_TIER = 5
def listr(s):
"""'x,x,x'string to list"""
return [ss for ss in s.split(',') if ss]
def save_moves(moves, keys, file_name, sort_alph=False):
"""Save moves (a list) to file. Pipe-separated and formatted to be very human-readable."""
if sort_alph:
moves = sorted(moves, key=lambda x: x['name'])
with open(file_name, 'w', encoding='utf-8') as f:
i = 0
end = len(moves)
batch_size = 25
while i < end:
batch = moves[i:i + batch_size]
col_lens = []
batch_vals = []
for m in batch:
batch_vals.append([repr(m[k]) for k in keys])
legend = keys[:]
legend[0] = '# ' + legend[0]
batch_vals = [legend] + batch_vals
for j, k in enumerate(keys):
col_len = max([len(vals[j]) for vals in batch_vals]) + 1
col_lens.append(col_len)
for vals in batch_vals:
for j, v in enumerate(vals):
vals[j] = v + ' ' * (col_lens[j] - len(v))
f.write('|'.join(vals) + '\n')
f.write('\n')
i += batch_size
f.write(f'# {len(moves)} moves')
def add(m, k, diff, mx=None, mn=None):
m[k] += diff
if mx is not None and m[k] > mx:
m[k] = mx
if mn is not None and m[k] < mn:
m[k] = mn
def up_tier(m, n=1):
add(m, 'tier', n, mx=TIER_MAX)
add(m, 'qi_cost', n * QI_COST_PER_TIER)
def mult(m, k, co):
m[k] *= co
m[k] = round(m[k])
def prefix(m, p):
m['name'] = p + ' ' + m['name']
add_feat(m, p.lower())
def add_feat(m, n):
new_features = m['features'].copy()
new_features.add(n)
m['features'] = new_features
def add_fun(m, n):
m['functions'] = m['functions'][:] + [n]
def light(m):
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'power', 0.8)
mult(m, 'accuracy', 1.1)
mult(m, 'stam_cost', 0.75)
up_tier(m)
prefix(m, 'Light')
return m
def heavy(m):
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'power', 1.2)
mult(m, 'accuracy', 0.9)
mult(m, 'stam_cost', 1.25)
up_tier(m)
prefix(m, 'Heavy')
return m
def long(m):
if m['distance'] >= 4:
return None
m = m.copy()
add(m, 'distance', 1)
add(m, 'freq', -1, mn=1)
if m['distance'] == 4:
up_tier(m, 2)
else:
up_tier(m)
prefix(m, 'Long')
return m
def short(m):
if m['distance'] <= 1:
return None
m = m.copy()
add(m, 'distance', -1)
up_tier(m)
add(m, 'freq', -1, mn=1)
prefix(m, 'Short')
return m
def charging(m):
m = m.copy()
if m['distance'] <= 1:
add(m, 'distance', 1)
up_tier(m, 2)
add(m, 'freq', -1, mn=1)
mult(m, 'stam_cost', 1.1)
mult(m, 'time_cost', 1.2)
add(m, 'dist_change', -1)
prefix(m, 'Charging')
return m
def retreating(m):
if m['distance'] >= 4:
return None
m = m.copy()
up_tier(m, 2)
add(m, 'freq', -1, mn=1)
mult(m, 'stam_cost', 1.15)
mult(m, 'time_cost', 1.25)
add(m, 'dist_change', 1)
prefix(m, 'Retreating')
return m
def onslaught(m):
m = m.copy()
if m['distance'] <= 2:
add(m, 'distance', 2)
up_tier(m, 3)
add(m, 'freq', -1, mn=1)
mult(m, 'stam_cost', 1.2)
mult(m, 'time_cost', 1.4)
add(m, 'complexity', 1)
add(m, 'dist_change', -2)
prefix(m, 'Onslaught')
return m
def vanishing(m):
if m['distance'] >= 3:
return None
m = m.copy()
up_tier(m, 3)
add(m, 'freq', -1, mn=1)
mult(m, 'stam_cost', 1.25)
mult(m, 'time_cost', 1.45)
add(m, 'complexity', 1)
add(m, 'dist_change', 2)
prefix(m, 'Vanishing')
return m
def backflip(m):
if 'kick' not in m['features'] or 'do_agility_based_dam' in m['functions']:
return None
m = m.copy()
up_tier(m, 4)
add(m, 'distance', -2)
add(m, 'freq', -1, mn=1)
mult(m, 'stam_cost', 1.25)
mult(m, 'time_cost', 1.25)
add(m, 'complexity', 1)
add(m, 'dist_change', 2)
add_fun(m, 'do_agility_based_dam')
add_fun(m, 'try_shock_move')
prefix(m, 'Backflip')
add_feat(m, 'acrobatic')
return m
def pushing(m):
if m['distance'] >= 4 or 'takedown' in m['features']:
return None
m = m.copy()
up_tier(m)
add_fun(m, 'do_knockback')
prefix(m, 'Pushing')
return m
def surprise(m):
if any(feat in m['features'] for feat in ('shocking', 'surprise', 'debilitating')):
return None
m = m.copy()
up_tier(m, 2)
add_fun(m, 'try_shock_move')
prefix(m, 'Surprise')
return m
def fast(m):
m = m.copy()
mult(m, 'time_cost', 0.8)
mult(m, 'stam_cost', 1.1)
up_tier(m)
prefix(m, 'Fast')
return m
def strong(m):
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'power', 1.2)
mult(m, 'stam_cost', 1.1)
up_tier(m, 2)
prefix(m, 'Strong')
return m
def precise(m):
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'time_cost', 1.1)
mult(m, 'accuracy', 1.2)
up_tier(m, 2)
prefix(m, 'Precise')
return m
def flying(m):
if m['distance'] >= 4 or 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'power', 0.9)
mult(m, 'stam_cost', 1.2)
add(m, 'distance', 1)
add(m, 'dist_change', -1)
add(m, 'complexity', 1)
up_tier(m, 3)
add(m, 'freq', -1, mn=1)
prefix(m, 'Flying')
return m
def acrobatic(m):
"""More power and accuracy at the cost of increased complexity; can stun"""
if 'do_agility_based_dam' in m['functions'] or 'do_strength_based_dam' in m['functions']:
return None
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'stam_cost', 1.25)
add(m, 'complexity', 2)
up_tier(m, 3)
add_fun(m, 'do_agility_based_dam')
add_fun(m, 'do_strength_based_dam')
add(m, 'freq', -2, mn=1)
prefix(m, 'Acrobatic')
return m
def power(m):
if 'do_strength_based_dam' in m['functions']:
return None
m = m.copy()
up_tier(m, 2)
add_fun(m, 'do_strength_based_dam')
prefix(m, 'Power')
return m
def trick(m):
if 'do_agility_based_dam' in m['functions']:
return None
m = m.copy()
up_tier(m, 2)
add_fun(m, 'do_agility_based_dam')
prefix(m, 'Trick')
return m
def lightning(m):
if 'do_speed_based_dam' in m['functions']:
return None
m = m.copy()
up_tier(m, 2)
add_fun(m, 'do_speed_based_dam')
prefix(m, 'Lightning')
return m
def energy(m):
if 'do_qi_based_dam' in m['functions']:
return None
m = m.copy()
up_tier(m, 2)
add_fun(m, 'do_qi_based_dam')
prefix(m, 'Energy')
return m
def ferocious(m):
if 'do_speed_based_dam' in m['functions'] or 'do_strength_based_dam' in m['functions']:
return None
m = m.copy()
mult(m, 'stam_cost', 1.2)
up_tier(m, 4)
add_fun(m, 'do_speed_based_dam')
add_fun(m, 'do_strength_based_dam')
prefix(m, 'Ferocious')
return m
def piercing(m):
if 'takedown' in m['features']:
return None
if 'do_agility_based_dam' in m['functions'] or 'do_speed_based_dam' in m['functions']:
return None
m = m.copy()
mult(m, 'stam_cost', 1.2)
up_tier(m, 4)
add_fun(m, 'do_agility_based_dam')
add_fun(m, 'do_speed_based_dam')
prefix(m, 'Piercing')
return m
def shocking(m):
if any(feat in m['features'] for feat in ('shocking', 'surprise', 'debilitating')):
return None
m = m.copy()
up_tier(m, 3)
add_fun(m, 'do_shock_move')
prefix(m, 'Shocking')
return m
def solar(m):
if 'takedown' in m['features']:
return None
m = m.copy()
up_tier(m, 3)
add_fun(m, 'do_stam_dam')
prefix(m, 'Solar')
return m
def nerve(m):
if 'takedown' in m['features']:
return None
m = m.copy()
up_tier(m, 3)
add_fun(m, 'do_mob_dam')
prefix(m, 'Nerve')
return m
def debilitating(m):
if any(feat in m['features'] for feat in ('shocking', 'surprise', 'debilitating')):
return None
m = m.copy()
up_tier(m, 5)
add_fun(m, 'do_shock_move')
add_fun(m, 'do_stam_dam')
add_fun(m, 'do_mob_dam')
add(m, 'freq', -1, mn=1)
prefix(m, 'Debilitating')
return m
def lethal(m):
if 'takedown' in m['features']:
return None
m = m.copy()
up_tier(m, 6)
add_fun(m, 'try_insta_ko')
add(m, 'freq', -2, mn=1)
prefix(m, 'Lethal')
return m
def slashing(m):
if 'cause_bleeding' in m['functions']:
return None
m = m.copy()
up_tier(m, 4)
add_fun(m, 'cause_bleeding')
add(m, 'freq', -2, mn=1)
prefix(m, 'Slashing')
return m
def skillful(m):
m = m.copy()
up_tier(m, 2)
mult(m, 'power', 1.1)
mult(m, 'accuracy', 1.1)
mult(m, 'time_cost', 0.9)
mult(m, 'stam_cost', 1.2)
prefix(m, 'Skillful')
return m
def superior(m):
m = m.copy()
up_tier(m, 3)
mult(m, 'power', 1.2)
mult(m, 'accuracy', 1.2)
mult(m, 'time_cost', 0.8)
mult(m, 'stam_cost', 1.4)
prefix(m, 'Superior')
return m
def advanced(m):
m = m.copy()
up_tier(m, 4)
mult(m, 'power', 1.3)
mult(m, 'accuracy', 1.3)
mult(m, 'time_cost', 0.7)
mult(m, 'stam_cost', 1.6)
prefix(m, 'Advanced')
return m
def expert(m):
m = m.copy()
up_tier(m, 5)
mult(m, 'power', 1.4)
mult(m, 'accuracy', 1.4)
mult(m, 'time_cost', 0.6)
mult(m, 'stam_cost', 1.8)
prefix(m, 'Expert')
return m
def ultimate(m):
m = m.copy()
up_tier(m, 6)
mult(m, 'power', 1.5)
mult(m, 'accuracy', 1.5)
mult(m, 'time_cost', 0.5)
mult(m, 'stam_cost', 2.0)
prefix(m, 'Ultimate')
return m
# todo ultra short kick
def gen_moves(moves):
new_moves = [] # move dicts
move_names = set() # strings
gen_dict = {
light: (shocking, solar, nerve, slashing),
heavy: (shocking, solar, nerve, slashing),
long: (
light,
heavy,
charging,
onslaught,
backflip,
pushing,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
slashing,
),
short: (
light,
heavy,
retreating,
pushing,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
slashing,
),
charging: (
light,
heavy,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
power,
trick,
lightning,
energy,
ferocious,
piercing,
slashing,
),
retreating: (
light,
heavy,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
power,
trick,
lightning,
energy,
piercing,
slashing,
),
onslaught: (
light,
heavy,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
power,
trick,
lightning,
energy,
ferocious,
piercing,
slashing,
),
vanishing: (
light,
heavy,
surprise,
shocking,
solar,
nerve,
debilitating,
lethal,
fast,
strong,
precise,
power,
trick,
lightning,
energy,
piercing,
slashing,
),
backflip: (solar, nerve, slashing),
pushing: (heavy, surprise, shocking, solar, nerve, fast, strong, precise),
surprise: (light, backflip, fast, trick, lightning, piercing, slashing),
fast: (
light,
backflip,
flying,
acrobatic,
power,
trick,
lightning,
energy,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
strong: (
backflip,
flying,
acrobatic,
trick,
lightning,
energy,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
precise: (
light,
backflip,
flying,
acrobatic,
trick,
lightning,
energy,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
flying: (
light,
heavy,
shocking,
solar,
nerve,
power,
trick,
lightning,
energy,
ferocious,
piercing,
debilitating,
lethal,
slashing,
),
acrobatic: (
charging,
retreating,
onslaught,
vanishing,
flying,
shocking,
solar,
nerve,
lightning,
energy,
piercing,
debilitating,
lethal,
slashing,
),
power: (solar, nerve, debilitating, lethal, slashing),
trick: (solar, nerve, debilitating, lethal, slashing),
lightning: (solar, nerve, debilitating, lethal, slashing),
energy: (solar, nerve, debilitating, lethal, slashing),
ferocious: (solar, nerve, debilitating, lethal, slashing),
piercing: (solar, nerve, debilitating, lethal, slashing),
shocking: (solar, nerve, power, trick, lightning, energy, ferocious, piercing, slashing),
solar: (),
nerve: (),
debilitating: (),
lethal: (),
slashing: (),
skillful: (
light,
heavy,
long,
short,
charging,
retreating,
onslaught,
vanishing,
backflip,
pushing,
surprise,
fast,
strong,
precise,
flying,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
superior: (
light,
heavy,
long,
short,
charging,
retreating,
onslaught,
vanishing,
backflip,
pushing,
surprise,
fast,
strong,
precise,
flying,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
advanced: (
light,
heavy,
long,
short,
charging,
retreating,
onslaught,
vanishing,
backflip,
pushing,
surprise,
fast,
strong,
precise,
flying,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
expert: (
light,
heavy,
long,
short,
charging,
retreating,
onslaught,
vanishing,
backflip,
pushing,
surprise,
fast,
strong,
precise,
flying,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
ultimate: (
light,
heavy,
long,
short,
charging,
retreating,
onslaught,
vanishing,
backflip,
pushing,
surprise,
fast,
strong,
precise,
flying,
acrobatic,
power,
trick,
lightning,
energy,
ferocious,
piercing,
shocking,
solar,
nerve,
debilitating,
lethal,
slashing,
),
}
for base_m in moves:
chains = [] # variable-length tuples of functions
# get all 'chains' of up to 3 functions for the base move
for fun1, funs2 in gen_dict.items():
chains.append((fun1,))
for fun2 in funs2:
chains.append((fun1, fun2))
# for fun3 in gen_dict[fun2]:
# chains.append((fun1, fun2, fun3))
# apply the chains whenever possible
for chain in chains:
chain = reversed(chain)
temp = base_m
for fun in chain:
new_move = fun(temp)
if new_move is not None:
if new_move['name'] not in move_names:
new_moves.append(new_move)
move_names.add(new_move['name'])
temp = new_move
else:
break
return moves + new_moves
def main():
from kf_lib.kung_fu.moves import read_moves
from kf_lib.utils import MOVES_FOLDER
base_moves, keys = read_moves(Path(MOVES_FOLDER, 'base_moves.txt'))
save_moves(base_moves, keys, Path(MOVES_FOLDER, 'base_moves.txt'))
extra_moves, keys = read_moves(Path(MOVES_FOLDER, 'extra_moves.txt'))
save_moves(extra_moves, keys, Path(MOVES_FOLDER, 'extra_moves.txt'))
style_moves, keys = read_moves(Path(MOVES_FOLDER, 'style_moves.txt'))
save_moves(style_moves, keys, Path(MOVES_FOLDER, 'style_moves.txt'))
takedown_moves = [m for m in extra_moves if 'takedown' in m['features']]
moves = gen_moves(base_moves + takedown_moves) # generated moves also include base_moves
moves += extra_moves + style_moves
save_moves(moves, keys, Path(MOVES_FOLDER, 'all_moves.txt'), sort_alph=True)
df = pd.DataFrame(moves, columns=keys)
df.to_csv(Path(MOVES_FOLDER, 'all_moves.csv'), sep=';')
print(f'generated {len(moves)} moves')
input('Press Enter to exit')
if __name__ == '__main__':
main()
|
import os
import argparse
def create_val_img_folder(args):
'''
This method is responsible for separating validation images into separate sub folders
'''
dataset_dir = os.path.join(args.data_dir)
val_dir = os.path.join(dataset_dir, 'val')
img_dir = os.path.join(val_dir, 'images')
fp = open(os.path.join(val_dir, 'val_annotations.txt'), 'r')
data = fp.readlines()
val_img_dict = {}
for line in data:
words = line.split('\t')
val_img_dict[words[0]] = words[1]
fp.close()
# Create folder if not present and move images into proper folders
for img, folder in val_img_dict.items():
newpath = (os.path.join(img_dir, folder))
if not os.path.exists(newpath):
os.makedirs(newpath)
if os.path.exists(os.path.join(img_dir, img)):
os.rename(os.path.join(img_dir, img), os.path.join(newpath, img))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument("data_dir")
args=parser.parse_args()
create_val_img_folder(args)
|
"""Root Manager object."""
import json
from collections import deque
from typing import TYPE_CHECKING, Dict, Optional, Type, Union
from .base import ItemCollection, ZWaveBase
from .const import EMPTY_PAYLOAD
from .models.instance import OZWInstance
from .options import OZWOptions
if TYPE_CHECKING:
from .base import DiscardMessages, EventMessages # noqa: F401
class OZWManager(ZWaveBase):
"""Manager that holds the OZW instances connected to MQTT."""
DIRECT_COLLECTION = "instance"
DEFAULT_VALUE: Optional[dict] = None
EVENT_CHANGED = "manager_placeholder_event"
def __init__(self, options: OZWOptions):
"""Initialize class."""
super().__init__(options, None, options.topic_prefix, None)
def create_collections(
self,
) -> Dict[
str,
Union[ItemCollection, Type["ZWaveBase"], "DiscardMessages", "EventMessages"],
]:
"""Create collections that the manager supports."""
return {"instance": ItemCollection(OZWInstance)}
def receive_message(self, topic: str, message: str) -> None:
"""Receive an MQTT message."""
assert topic.startswith(self.options.topic_prefix)
topic_parts_raw = topic[len(self.options.topic_prefix) :].split("/")
instance_id = self.options.instance_id
if instance_id is not None and topic_parts_raw[0] != str(instance_id):
return
if topic_parts_raw[-1] == "":
topic_parts_raw.pop()
topic_parts = deque(topic_parts_raw)
if message == "":
payload = EMPTY_PAYLOAD
else:
payload = json.loads(message)
self.process_message(topic_parts, payload)
|
"""TensorFlow workspace initialization. Consult the WORKSPACE on how to use it."""
# Import third party config rules.
load("//tensorflow:version_check.bzl", "check_bazel_version_at_least")
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("@tf_toolchains//toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("@tf_toolchains//toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure")
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl", "def_file_filter_configure")
load("//third_party/llvm:setup.bzl", "llvm_setup")
# Import third party repository rules. See go/tfbr-thirdparty.
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/absl:workspace.bzl", absl = "repo")
load("//third_party/benchmark:workspace.bzl", benchmark = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/eigen3:workspace.bzl", eigen3 = "repo")
load("//third_party/farmhash:workspace.bzl", farmhash = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/gemmlowp:workspace.bzl", gemmlowp = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/libprotobuf_mutator:workspace.bzl", libprotobuf_mutator = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/pybind11_abseil:workspace.bzl", pybind11_abseil = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
load("//third_party/tensorrt:workspace.bzl", tensorrt = "repo")
# Import external repository rules.
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load("@tf_runtime//:dependencies.bzl", "tfrt_dependencies")
load("@tf_toolchains//toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
load("@tf_toolchains//toolchains/remote:configure.bzl", "remote_execution_configure")
load("@tf_toolchains//toolchains/clang6:repo.bzl", "clang6_configure")
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
def _initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
absl()
benchmark()
clog()
cpuinfo()
dlpack()
eigen3()
farmhash()
flatbuffers()
gemmlowp()
hexagon_nn()
highwayhash()
hwloc()
icu()
jpeg()
kissfft()
libprotobuf_mutator()
nasm()
opencl_headers()
pasta()
psimd()
pybind11_abseil()
ruy()
sobol_data()
vulkan_headers()
tensorrt()
# Toolchains & platforms required by Tensorflow to build.
def _tf_toolchains():
native.register_execution_platforms("@local_execution_config_platform//:platform")
native.register_toolchains("@local_execution_config_python//:py_toolchain")
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = "@tf_toolchains//toolchains/cpus/arm:BUILD",
remote_config_repo_arm = "../arm_compiler",
remote_config_repo_aarch64 = "../aarch64_compiler",
)
# TFLite crossbuild toolchain for embeddeds Linux
arm_linux_toolchain_configure(
name = "local_config_embedded_arm",
build_file = "@tf_toolchains//toolchains/embedded/arm-linux:BUILD",
aarch64_repo = "../aarch64_linux_toolchain",
armhf_repo = "../armhf_linux_toolchain",
)
# Define all external repositories required by TensorFlow
def _tf_repositories():
"""All external dependencies for TF builds."""
# To update any of the dependencies bellow:
# a) update URL and strip_prefix to the new git commit hash
# b) get the sha256 hash of the commit by running:
# curl -L <url> | sha256sum
# and update the sha256 with the result.
# LINT.IfChange
tf_http_archive(
name = "XNNPACK",
sha256 = "18fff212c03771eb1813e9303c057e43a07c34283811385d16120862bb7c3b3a",
strip_prefix = "XNNPACK-1425eb5d88d626313cec6acd1a4c2363cb82f9c2",
urls = tf_mirror_urls("https://github.com/google/XNNPACK/archive/1425eb5d88d626313cec6acd1a4c2363cb82f9c2.zip"),
)
# LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/xnnpack.cmake)
tf_http_archive(
name = "FXdiv",
sha256 = "3d7b0e9c4c658a84376a1086126be02f9b7f753caa95e009d9ac38d11da444db",
strip_prefix = "FXdiv-63058eff77e11aa15bf531df5dd34395ec3017c8",
urls = tf_mirror_urls("https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip"),
)
tf_http_archive(
name = "pthreadpool",
sha256 = "b96413b10dd8edaa4f6c0a60c6cf5ef55eebeef78164d5d69294c8173457f0ec",
strip_prefix = "pthreadpool-b8374f80e42010941bda6c85b0e3f1a1bd77a1e0",
urls = tf_mirror_urls("https://github.com/Maratyszcza/pthreadpool/archive/b8374f80e42010941bda6c85b0e3f1a1bd77a1e0.zip"),
)
tf_http_archive(
name = "cudnn_frontend_archive",
build_file = "//third_party:cudnn_frontend.BUILD",
patch_file = ["//third_party:cudnn_frontend_header_fix.patch"],
sha256 = "fdf4234e9c9c481b3b3a80ad404bc278e6ecb761c5574beb4d3a2cde4a9002ad",
strip_prefix = "cudnn-frontend-73210a930333eaf66b42b01693bce7b70719c354",
urls = tf_mirror_urls("https://github.com/NVIDIA/cudnn-frontend/archive/73210a930333eaf66b42b01693bce7b70719c354.zip"),
)
tf_http_archive(
name = "mkl_dnn",
build_file = "//third_party/mkl_dnn:mkldnn.BUILD",
sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec",
strip_prefix = "oneDNN-0.21.3",
urls = tf_mirror_urls("https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz"),
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = "//third_party/mkl_dnn:mkldnn_v1.BUILD",
sha256 = "f1c5a35c2c091e02417d7aa6ede83f863d35cf0ad91a132185952f5cca7b4887",
strip_prefix = "oneDNN-2.5.1",
urls = tf_mirror_urls("https://github.com/oneapi-src/oneDNN/archive/refs/tags/v2.5.1.tar.gz"),
)
tf_http_archive(
name = "mkl_dnn_acl_compatible",
build_file = "//third_party/mkl_dnn:mkldnn_acl.BUILD",
sha256 = "d7a47caeb28d2c67dc8fa0d0f338b11fbf25b473a608f04cfed913aea88815a9",
strip_prefix = "oneDNN-2.5",
urls = tf_mirror_urls("https://github.com/oneapi-src/oneDNN/archive/v2.5.tar.gz"),
)
tf_http_archive(
name = "compute_library",
sha256 = "8322ed2e135999569082a95e7fbb2fa87786ffb1c67935b3ef71e00b53f2c887",
strip_prefix = "ComputeLibrary-21.11",
build_file = "//third_party/compute_library:BUILD",
patch_file = ["//third_party/compute_library:compute_library.patch"],
urls = tf_mirror_urls("https://github.com/ARM-software/ComputeLibrary/archive/v21.11.tar.gz"),
)
tf_http_archive(
name = "arm_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = tf_mirror_urls("https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz"),
)
tf_http_archive(
# This is the latest `aarch64-none-linux-gnu` compiler provided by ARM
# See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads
# The archive contains GCC version 9.2.1
name = "aarch64_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66",
strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu",
urls = tf_mirror_urls("https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz"),
)
tf_http_archive(
name = "aarch64_linux_toolchain",
build_file = "@tf_toolchains//toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD",
sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu",
urls = tf_mirror_urls("https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz"),
)
tf_http_archive(
name = "armhf_linux_toolchain",
build_file = "@tf_toolchains//toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD",
sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf",
urls = tf_mirror_urls("https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz"),
)
tf_http_archive(
name = "libxsmm_archive",
build_file = "//third_party:libxsmm.BUILD",
sha256 = "e491ccadebc5cdcd1fc08b5b4509a0aba4e2c096f53d7880062a66b82a0baf84",
strip_prefix = "libxsmm-1.16.3",
urls = tf_mirror_urls("https://github.com/hfp/libxsmm/archive/1.16.3.tar.gz"),
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = "//third_party/systemlibs:re2.BUILD",
urls = tf_mirror_urls("https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz"),
)
tf_http_archive(
name = "com_github_google_crc32c",
sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9",
build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD",
strip_prefix = "crc32c-1.0.6",
urls = tf_mirror_urls("https://github.com/google/crc32c/archive/1.0.6.tar.gz"),
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0",
strip_prefix = "google-cloud-cpp-1.17.1",
repo_mapping = {
"@com_github_curl_curl": "@curl",
"@com_github_nlohmann_json": "@nlohmann_json_lib",
},
system_build_file = "//third_party/systemlibs:google_cloud_cpp.BUILD",
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = tf_mirror_urls("https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz"),
)
tf_http_archive(
name = "com_github_googlecloudplatform_tensorflow_gcp_tools",
sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542",
strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5",
urls = tf_mirror_urls("https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz"),
)
tf_http_archive(
name = "com_google_googleapis",
build_file = "//third_party/googleapis:googleapis.BUILD",
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = tf_mirror_urls("https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip"),
)
tf_http_archive(
name = "png",
build_file = "//third_party:png.BUILD",
patch_file = ["//third_party:png_fix_rpi.patch"],
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = "//third_party/systemlibs:png.BUILD",
urls = tf_mirror_urls("https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz"),
)
tf_http_archive(
name = "org_sqlite",
build_file = "//third_party:sqlite.BUILD",
sha256 = "999826fe4c871f18919fdb8ed7ec9dd8217180854dd1fe21eea96aed36186729",
strip_prefix = "sqlite-amalgamation-3360000",
system_build_file = "//third_party/systemlibs:sqlite.BUILD",
urls = tf_mirror_urls("https://www.sqlite.org/2021/sqlite-amalgamation-3360000.zip"),
)
tf_http_archive(
name = "gif",
build_file = "//third_party:gif.BUILD",
patch_file = ["//third_party:gif_fix_strtok_r.patch"],
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = "//third_party/systemlibs:gif.BUILD",
urls = tf_mirror_urls("https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz"),
)
tf_http_archive(
name = "six_archive",
build_file = "//third_party:six.BUILD",
sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
strip_prefix = "six-1.15.0",
system_build_file = "//third_party/systemlibs:six.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz"),
)
tf_http_archive(
name = "astor_archive",
build_file = "//third_party:astor.BUILD",
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = "//third_party/systemlibs:astor.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz"),
)
tf_http_archive(
name = "astunparse_archive",
build_file = "//third_party:astunparse.BUILD",
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = "//third_party/systemlibs:astunparse.BUILD",
urls = tf_mirror_urls("https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz"),
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": tf_mirror_urls("https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE"),
},
)
tf_http_archive(
name = "functools32_archive",
build_file = "//third_party:functools32.BUILD",
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = "//third_party/systemlibs:functools32.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz"),
)
tf_http_archive(
name = "gast_archive",
build_file = "//third_party:gast.BUILD",
sha256 = "40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1",
strip_prefix = "gast-0.4.0",
system_build_file = "//third_party/systemlibs:gast.BUILD",
urls = tf_mirror_urls("https://files.pythonhosted.org/packages/83/4a/07c7e59cef23fb147454663c3271c21da68ba2ab141427c20548ae5a8a4d/gast-0.4.0.tar.gz"),
)
tf_http_archive(
name = "termcolor_archive",
build_file = "//third_party:termcolor.BUILD",
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = "//third_party/systemlibs:termcolor.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz"),
)
tf_http_archive(
name = "typing_extensions_archive",
build_file = "//third_party:typing_extensions.BUILD",
sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae",
strip_prefix = "typing_extensions-3.7.4.2/src_py3",
system_build_file = "//third_party/systemlibs:typing_extensions.BUILD",
urls = tf_mirror_urls("https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz"),
)
filegroup_external(
name = "typing_extensions_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": tf_mirror_urls("https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE"),
},
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = "//third_party:opt_einsum.BUILD",
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = "//third_party/systemlibs:opt_einsum.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz"),
)
tf_http_archive(
name = "absl_py",
sha256 = "a7c51b2a0aa6357a9cbb2d9437e8cd787200531867dc02565218930b6a32166e",
strip_prefix = "abseil-py-1.0.0",
system_build_file = "//third_party/systemlibs:absl_py.BUILD",
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = tf_mirror_urls("https://github.com/abseil/abseil-py/archive/refs/tags/v1.0.0.tar.gz"),
)
tf_http_archive(
name = "dill_archive",
build_file = "//third_party:dill.BUILD",
system_build_file = "//third_party/systemlibs:dill.BUILD",
urls = tf_mirror_urls("https://files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip"),
sha256 = "6e12da0d8e49c220e8d6e97ee8882002e624f1160289ce85ec2cc0a5246b3a2e",
strip_prefix = "dill-0.3.2",
)
tf_http_archive(
name = "tblib_archive",
build_file = "//third_party:tblib.BUILD",
system_build_file = "//third_party/systemlibs:tblib.BUILD",
urls = tf_mirror_urls("https://files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz"),
sha256 = "059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c",
strip_prefix = "tblib-1.7.0",
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": tf_mirror_urls("https://docs.python.org/2.7/_sources/license.rst.txt"),
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = ["//third_party/protobuf:protobuf.patch"],
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = "//third_party/systemlibs:protobuf.BUILD",
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
"//third_party/systemlibs:protobuf_deps.bzl": "protobuf_deps.bzl",
},
urls = tf_mirror_urls("https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip"),
)
tf_http_archive(
name = "nsync",
patch_file = ["//third_party:nsync.patch"],
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = "//third_party/systemlibs:nsync.BUILD",
urls = tf_mirror_urls("https://github.com/google/nsync/archive/1.22.0.tar.gz"),
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "bc1cc26d1120f5a7e9eb450751c0b24160734e46a02823a573f3c6b6c0a574a7",
strip_prefix = "googletest-e2c06aa2497e330bab1c1a03d02f7c5096eb5b0b",
urls = tf_mirror_urls("https://github.com/google/googletest/archive/e2c06aa2497e330bab1c1a03d02f7c5096eb5b0b.zip"),
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = tf_mirror_urls("https://github.com/gflags/gflags/archive/v2.2.1.tar.gz"),
)
tf_http_archive(
name = "curl",
build_file = "//third_party:curl.BUILD",
sha256 = "ac8e1087711084548d788ef18b9b732c8de887457b81f616fc681d1044b32f98",
strip_prefix = "curl-7.81.0",
system_build_file = "//third_party/systemlibs:curl.BUILD",
urls = tf_mirror_urls("https://curl.haxx.se/download/curl-7.81.0.tar.gz"),
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = "//third_party/systemlibs:grpc.BUILD",
patch_file = ["//third_party/grpc:generate_cc_env_fix.patch"],
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl": "bazel/grpc_extra_deps.bzl",
"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl": "bazel/cc_grpc_library.bzl",
"//third_party/systemlibs:grpc.bazel.generate_cc.bzl": "bazel/generate_cc.bzl",
"//third_party/systemlibs:grpc.bazel.protobuf.bzl": "bazel/protobuf.bzl",
},
urls = tf_mirror_urls("https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz"),
)
tf_http_archive(
name = "linenoise",
build_file = "//third_party:linenoise.BUILD",
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = tf_mirror_urls("https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz"),
)
llvm_setup(name = "llvm-project")
# Intel openMP that is part of LLVM sources.
tf_http_archive(
name = "llvm_openmp",
build_file = "//third_party/llvm_openmp:BUILD",
sha256 = "d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44",
strip_prefix = "openmp-10.0.1.src",
urls = tf_mirror_urls("https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz"),
)
tf_http_archive(
name = "lmdb",
build_file = "//third_party:lmdb.BUILD",
sha256 = "22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb",
strip_prefix = "lmdb-LMDB_0.9.29/libraries/liblmdb",
system_build_file = "//third_party/systemlibs:lmdb.BUILD",
urls = tf_mirror_urls("https://github.com/LMDB/lmdb/archive/refs/tags/LMDB_0.9.29.tar.gz"),
)
tf_http_archive(
name = "jsoncpp_git",
sha256 = "f409856e5920c18d0c2fb85276e24ee607d2a09b5e7d5f0a371368903c275da2",
strip_prefix = "jsoncpp-1.9.5",
system_build_file = "//third_party/systemlibs:jsoncpp.BUILD",
urls = tf_mirror_urls("https://github.com/open-source-parsers/jsoncpp/archive/1.9.5.tar.gz"),
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = "//third_party/systemlibs:boringssl.BUILD",
urls = tf_mirror_urls("https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz"),
)
tf_http_archive(
name = "zlib",
build_file = "//third_party:zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = "//third_party/systemlibs:zlib.BUILD",
urls = tf_mirror_urls("https://zlib.net/zlib-1.2.11.tar.gz"),
)
# LINT.IfChange
tf_http_archive(
name = "fft2d",
build_file = "//third_party/fft2d:fft2d.BUILD",
sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb",
strip_prefix = "OouraFFT-1.0",
urls = tf_mirror_urls("https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz"),
)
# LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/fft2d.cmake)
tf_http_archive(
name = "snappy",
build_file = "//third_party:snappy.BUILD",
sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f",
strip_prefix = "snappy-1.1.8",
system_build_file = "//third_party/systemlibs:snappy.BUILD",
urls = tf_mirror_urls("https://github.com/google/snappy/archive/1.1.8.tar.gz"),
)
tf_http_archive(
name = "nccl_archive",
build_file = "//third_party:nccl/archive.BUILD",
patch_file = ["//third_party/nccl:archive.patch"],
sha256 = "3ae89ddb2956fff081e406a94ff54ae5e52359f5d645ce977c7eba09b3b782e6",
strip_prefix = "nccl-2.8.3-1",
urls = tf_mirror_urls("https://github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz"),
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = "//third_party:pprof.BUILD",
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = tf_mirror_urls("https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz"),
)
# The CUDA 11 toolkit ships with CUB. We should be able to delete this rule
# once TF drops support for CUDA 10.
tf_http_archive(
name = "cub_archive",
build_file = "//third_party:cub.BUILD",
sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda",
strip_prefix = "cub-1.9.9",
urls = tf_mirror_urls("https://github.com/NVlabs/cub/archive/1.9.9.zip"),
)
tf_http_archive(
name = "cython",
build_file = "//third_party:cython.BUILD",
sha256 = "e2e38e1f0572ca54d6085df3dec8b607d20e81515fb80215aed19c81e8fe2079",
strip_prefix = "cython-0.29.21",
system_build_file = "//third_party/systemlibs:cython.BUILD",
urls = tf_mirror_urls("https://github.com/cython/cython/archive/0.29.21.tar.gz"),
)
# LINT.IfChange
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = "//third_party:arm_neon_2_x86_sse.BUILD",
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = tf_mirror_urls("https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz"),
)
# LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/neon2sse.cmake)
tf_http_archive(
name = "double_conversion",
sha256 = "a0204d6ab48223f2c8f53a932014e7f245125e7a5267764b1fbeebe4fa0ee8b9",
strip_prefix = "double-conversion-3.1.7",
system_build_file = "//third_party/systemlibs:double_conversion.BUILD",
urls = tf_mirror_urls("https://github.com/google/double-conversion/archive/refs/tags/v3.1.7.tar.gz"),
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = "//third_party:tflite_mobilenet_float.BUILD",
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = "//third_party:tflite_mobilenet_quant.BUILD",
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = "//third_party:tflite_ovic_testdata.BUILD",
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = tf_mirror_urls("https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz"),
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = tf_mirror_urls("https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip"),
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "a5f00fd89eff67291f6cd3efdc8fad30f4727e6ebb90718f3f05bbf3c3dd5ed7",
urls = tf_mirror_urls("https://github.com/bazelbuild/rules_apple/releases/download/0.33.0/rules_apple.0.33.0.tar.gz"),
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "8a49da750560b185804a4bc95c82d3f9cc4c2caf788960b0e21945759155fdd9",
urls = tf_mirror_urls("https://github.com/bazelbuild/rules_swift/releases/download/0.25.0/rules_swift.0.25.0.tar.gz"),
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "c604b75865c07f45828c7fffd5fdbff81415a9e84a68f71a9c1d8e3b2694e547",
urls = tf_mirror_urls("https://github.com/bazelbuild/apple_support/releases/download/0.12.1/apple_support.0.12.1.tar.gz"),
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.18.0/",
sha256 = "6b96d07bfbfa1334909eeb1430c69a93af71c961695b0a5f3536d087a58d8e41",
urls = tf_mirror_urls("https://github.com/apple/swift-protobuf/archive/1.18.0.zip"),
)
# https://github.com/google/xctestrunner/releases
tf_http_archive(
name = "xctestrunner",
strip_prefix = "xctestrunner-0.2.15",
sha256 = "b789cf18037c8c28d17365f14925f83b93b1f7dabcabb80333ae4331cf0bcb2f",
urls = tf_mirror_urls("https://github.com/google/xctestrunner/archive/refs/tags/0.2.15.tar.gz"),
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = "//third_party:nlohmann_json.BUILD",
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = tf_mirror_urls("https://github.com/nlohmann/json/archive/v3.4.0.tar.gz"),
)
tf_http_archive(
name = "pybind11",
urls = tf_mirror_urls("https://github.com/pybind/pybind11/archive/v2.9.0.tar.gz"),
sha256 = "057fb68dafd972bc13afb855f3b0d8cf0fa1a78ef053e815d9af79be7ff567cb",
strip_prefix = "pybind11-2.9.0",
build_file = "//third_party:pybind11.BUILD",
system_build_file = "//third_party/systemlibs:pybind11.BUILD",
)
tf_http_archive(
name = "wrapt",
build_file = "//third_party:wrapt.BUILD",
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = "//third_party/systemlibs:wrapt.BUILD",
urls = tf_mirror_urls("https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz"),
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = "//third_party:coremltools.BUILD",
urls = tf_mirror_urls("https://github.com/apple/coremltools/archive/3.3.zip"),
)
def workspace():
# Check the bazel version before executing any repository rules, in case
# those rules rely on the version we require here.
check_bazel_version_at_least("1.0.0")
# Initialize toolchains and platforms.
_tf_toolchains()
# Import third party repositories according to go/tfbr-thirdparty.
_initialize_third_party()
# Import all other repositories. This should happen before initializing
# any external repositories, because those come with their own
# dependencies. Those recursive dependencies will only be imported if they
# don't already exist (at least if the external repository macros were
# written according to common practice to query native.existing_rule()).
_tf_repositories()
tfrt_dependencies()
# TODO(rostam): Delete after the release of Bazel built-in cc_shared_library.
# Initializes Bazel package rules' external dependencies.
rules_pkg_dependencies()
# Alias so it can be loaded without assigning to a different symbol to prevent
# shadowing previous loads and trigger a buildifier warning.
tf_workspace2 = workspace
|
from typing import Dict, Tuple
import pickle
import telegram
import redis
from lib.telegram.state import State
from lib.telegram.handlers import (
BaseUpdateHandler,
UpdateHandlerDefault,
UpdateHandlerShowGame,
UpdateHandlerDeleteGameConfirmation,
UpdateHandlerCreateGameSubmitSize,
UpdateHandlerCreateGameSubmitPlayersLimit,
UpdateHandlerCreateGameSubmitWalls,
)
from lib.telegram.command import Command
class TelegramBotException(Exception):
pass
class TelegramBot:
"""Telegram updates handler
"""
# Hold chat states and variables for 1h
EXPIRE_SECONDS = 3600
_bot: telegram.Bot
_redis_client: redis.Redis
_handlers: Dict[State, BaseUpdateHandler]
def __init__(self, bot: telegram.Bot, redis_client: redis.Redis):
self._bot = bot
self._redis_client = redis_client
self._handlers = {
State.DEFAULT:
UpdateHandlerDefault(bot),
State.SHOW_GAME:
UpdateHandlerShowGame(bot),
State.DELETE_GAME_CONFIRMATION:
UpdateHandlerDeleteGameConfirmation(bot),
State.CREATE_GAME_SUBMIT_SIZE:
UpdateHandlerCreateGameSubmitSize(bot),
State.CREATE_GAME_SUBMIT_PLAYERS_LIMIT:
UpdateHandlerCreateGameSubmitPlayersLimit(bot),
State.CREATE_GAME_SUBMIT_WALLS:
UpdateHandlerCreateGameSubmitWalls(bot),
}
def _get_chat_state_data(self, chat_id: int) -> Tuple[State, list]:
chat_stored_data = self._redis_client.get(chat_id)
if chat_stored_data is None:
return State.DEFAULT, []
chat_data: list
try:
chat_data = list(pickle.loads(chat_stored_data))
except pickle.UnpicklingError:
# Clear whatever is stored for the given chat id
self._redis_client.delete(chat_id)
return State.DEFAULT, []
if not chat_data:
return State.DEFAULT, []
chat_state, *state_data = chat_data
return chat_state, state_data
def _set_chat_state_data(self,
chat_id: int,
chat_state: State,
state_data: list):
chat_data = [chat_state, *state_data]
try:
encoded_chat_data = pickle.dumps(chat_data)
self._redis_client.setex(chat_id,
self.EXPIRE_SECONDS,
encoded_chat_data)
except pickle.PicklingError as e:
raise TelegramBotException('Cannot save chat data') from e
def _get_handler(self, chat_state: State) -> BaseUpdateHandler:
return self._handlers.get(chat_state, self._handlers[State.DEFAULT])
def handle(self, chat_id: int, cmd: Command, args: tuple):
chat_state, state_data = self._get_chat_state_data(chat_id)
handler = self._get_handler(chat_state)
next_chat_state, next_state_data = handler.handle(chat_id,
cmd,
args,
state_data)
self._set_chat_state_data(chat_id, next_chat_state, next_state_data)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming perf.
import calendar
import os
DEPS = [
'core',
'env',
'flavor',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'recipe_engine/time',
'run',
'vars',
]
def nanobench_flags(api, bot):
args = ['--pre_log']
if 'GPU' in bot:
args.append('--images')
args.extend(['--gpuStatsDump', 'true'])
if 'Android' in bot and 'GPU' in bot:
args.extend(['--useThermalManager', '1,1,10,1000'])
args.extend(['--scales', '1.0', '1.1'])
if 'iOS' in bot:
args.extend(['--skps', 'ignore_skps'])
configs = []
if api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
args.append('--nogpu')
configs.extend(['8888', 'nonrendering'])
if '-arm-' not in bot:
# For Android CPU tests, these take too long and cause the task to time
# out.
configs += [ 'f16', 'srgb' ]
if '-GCE-' in bot:
configs += [ '565' ]
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
args.append('--nocpu')
gl_prefix = 'gl'
sample_count = '8'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
# The NVIDIA_Shield has a regular OpenGL implementation. We bench that
# instead of ES.
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
# The NP produces a long error stream when we run with MSAA.
# iOS crashes (skia:6399)
# Nexus7 (Tegra3) does not support MSAA.
if ('NexusPlayer' in bot or
'iOS' in bot or
'Nexus7' in bot):
sample_count = ''
elif 'Intel' in bot:
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
configs.extend([gl_prefix, gl_prefix + 'srgb'])
if sample_count is not '':
configs.append(gl_prefix + 'msaa' + sample_count)
if ('TegraX1' in bot or
'Quadro' in bot or
'GTX' in bot or
('GT610' in bot and 'Ubuntu17' not in bot)):
configs.extend([gl_prefix + 'nvpr' + sample_count,
gl_prefix + 'nvprdit' + sample_count])
# We want to test both the OpenGL config and the GLES config on Linux Intel:
# GL is used by Chrome, GLES is used by ChromeOS.
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles', 'glessrgb'])
# The following devices do not support glessrgb.
if 'glessrgb' in configs:
if ('IntelHD405' in bot or
'IntelIris540' in bot or
'IntelIris640' in bot or
'IntelBayTrail' in bot or
'IntelHD2000' in bot or
'AndroidOne' in bot or
'Nexus7' in bot or
'NexusPlayer' in bot):
configs.remove('glessrgb')
# Bench instanced rendering on a limited number of platforms
inst_config = gl_prefix + 'inst'
if 'PixelC' in bot or 'NVIDIA_Shield' in bot or 'MacMini7.1' in bot:
configs.append(inst_config)
if sample_count:
configs.append(inst_config + sample_count)
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
if 'Vulkan' in bot:
configs = ['vk']
if 'ANGLE' in bot:
# Test only ANGLE configs.
configs = ['angle_d3d11_es2']
if sample_count is not '':
configs.append('angle_d3d11_es2_msaa' + sample_count)
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
args.append('--config')
args.extend(configs)
# By default, we test with GPU threading enabled. Leave PixelC devices
# running without threads, just to get some coverage of that code path.
if 'PixelC' in bot:
args.extend(['--gpuThreads', '0'])
if 'Valgrind' in bot:
# Don't care about Valgrind performance.
args.extend(['--loops', '1'])
args.extend(['--samples', '1'])
# Ensure that the bot framework does not think we have timed out.
args.extend(['--keepAlive', 'true'])
# Some people don't like verbose output.
verbose = False
match = []
if 'Android' in bot:
# Segfaults when run as GPU bench. Very large texture?
match.append('~blurroundrect')
match.append('~patch_grid') # skia:2847
match.append('~desk_carsvg')
if 'NexusPlayer' in bot:
match.append('~desk_unicodetable')
if 'Nexus5' in bot:
match.append('~keymobi_shop_mobileweb_ebay_com.skp') # skia:5178
if 'iOS' in bot:
match.append('~blurroundrect')
match.append('~patch_grid') # skia:2847
match.append('~desk_carsvg')
match.append('~keymobi')
match.append('~path_hairline')
match.append('~GLInstancedArraysBench') # skia:4714
if 'IntelIris540' in bot and 'ANGLE' in bot:
match.append('~tile_image_filter_tiled_64') # skia:6082
if ('Vulkan' in bot and ('IntelIris540' in bot or 'IntelIris640' in bot) and
'Win' in bot):
# skia:6398
match.append('~GM_varied_text_clipped_lcd')
match.append('~GM_varied_text_ignorable_clip_lcd')
match.append('~blendmode_mask_DstATop')
match.append('~blendmode_mask_SrcIn')
match.append('~blendmode_mask_SrcOut')
match.append('~blendmode_mask_Src')
match.append('~fontscaler_lcd')
match.append('~rotated_rects_aa_alternating_transparent_and_opaque_src')
match.append('~rotated_rects_aa_changing_transparent_src')
match.append('~rotated_rects_aa_same_transparent_src')
match.append('~shadermask_LCD_FF')
match.append('~srcmode_rects_1')
match.append('~text_16_LCD_88')
match.append('~text_16_LCD_BK')
match.append('~text_16_LCD_FF')
match.append('~text_16_LCD_WT')
# skia:6863
match.append('~desk_skbug6850overlay2')
match.append('~desk_googlespreadsheet')
if ('Intel' in bot and api.vars.is_linux and not 'Vulkan' in bot):
# TODO(dogben): Track down what's causing bots to die.
verbose = True
if 'Vulkan' in bot and 'NexusPlayer' in bot:
match.append('~blendmode_') # skia:6691
if 'float_cast_overflow' in bot and 'CPU' in bot:
# skia:4632
match.append('~^floor2int_undef$')
# We do not need or want to benchmark the decodes of incomplete images.
# In fact, in nanobench we assert that the full image decode succeeds.
match.append('~inc0.gif')
match.append('~inc1.gif')
match.append('~incInterlaced.gif')
match.append('~inc0.jpg')
match.append('~incGray.jpg')
match.append('~inc0.wbmp')
match.append('~inc1.wbmp')
match.append('~inc0.webp')
match.append('~inc1.webp')
match.append('~inc0.ico')
match.append('~inc1.ico')
match.append('~inc0.png')
match.append('~inc1.png')
match.append('~inc2.png')
match.append('~inc12.png')
match.append('~inc13.png')
match.append('~inc14.png')
match.append('~inc0.webp')
match.append('~inc1.webp')
if match:
args.append('--match')
args.extend(match)
if verbose:
args.append('--verbose')
return args
def perf_steps(api):
"""Run Skia benchmarks."""
if api.vars.upload_perf_results:
api.flavor.create_clean_device_dir(
api.flavor.device_dirs.perf_data_dir)
# Run nanobench.
properties = [
'--properties',
'gitHash', api.vars.got_revision,
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
target = 'nanobench'
args = [
target,
'-i', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'nanobench'),
]
# Do not run svgs on Valgrind.
if 'Valgrind' not in api.vars.builder_name:
if ('Vulkan' not in api.vars.builder_name or
'NexusPlayer' not in api.vars.builder_name):
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
args.extend(nanobench_flags(api, api.vars.builder_name))
if 'Chromecast' in api.vars.builder_cfg.get('os', ''):
# Due to limited disk space, run a watered down perf run on Chromecast.
args = [target]
if api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
args.extend(['--nogpu', '--config', '8888'])
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
args.extend(['--nocpu', '--config', 'gles'])
args.extend([
'-i', api.flavor.device_dirs.resource_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'color_wheel.jpg'),
'--skps', api.flavor.device_dirs.skp_dir,
'--pre_log',
'--match', # skia:6581
'~matrixconvolution',
'~blur_image_filter',
'~blur_0.01',
'~GM_animated-image-blurs',
'~blendmode_mask_',
])
if api.vars.upload_perf_results:
now = api.time.utcnow()
ts = int(calendar.timegm(now.utctimetuple()))
json_path = api.flavor.device_path_join(
api.flavor.device_dirs.perf_data_dir,
'nanobench_%s_%d.json' % (api.vars.got_revision, ts))
args.extend(['--outResultsFile', json_path])
args.extend(properties)
keys_blacklist = ['configuration', 'role', 'test_filter']
args.append('--key')
for k in sorted(api.vars.builder_cfg.keys()):
if not k in keys_blacklist:
args.extend([k, api.vars.builder_cfg[k]])
# See skia:2789.
extra_config_parts = api.vars.builder_cfg.get('extra_config', '').split('_')
if 'AbandonGpuContext' in extra_config_parts:
args.extend(['--abandonGpuContext'])
api.run(api.flavor.step, target, cmd=args,
abort_on_failure=False)
# Copy results to swarming out dir.
if api.vars.upload_perf_results:
api.file.ensure_directory('makedirs perf_dir',
api.path.dirname(api.vars.perf_data_dir))
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.perf_data_dir,
api.vars.perf_data_dir)
def RunSteps(api):
api.core.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.nanobench'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.env(env):
try:
if 'Chromecast' in api.vars.builder_name:
api.flavor.install(resources=True, skps=True)
else:
api.flavor.install_everything()
perf_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
('Perf-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-All-'
'Android_Vulkan'),
'Perf-Android-Clang-Nexus10-CPU-Exynos5250-arm-Release-All-Android',
'Perf-Android-Clang-Nexus5-GPU-Adreno330-arm-Debug-All-Android',
'Perf-Android-Clang-Nexus7-GPU-Tegra3-arm-Release-All-Android',
'Perf-Android-Clang-NexusPlayer-GPU-PowerVR-x86-Release-All-Android',
'Perf-Android-Clang-NexusPlayer-GPU-PowerVR-x86-Release-All-Android_Vulkan',
'Perf-Android-Clang-PixelC-GPU-TegraX1-arm64-Release-All-Android_Skpbench',
'Perf-ChromeOS-Clang-ASUSChromebookFlipC100-GPU-MaliT764-arm-Release-All',
'Perf-Chromecast-GCC-Chorizo-CPU-Cortex_A7-arm-Debug-All',
'Perf-Chromecast-GCC-Chorizo-GPU-Cortex_A7-arm-Release-All',
'Perf-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-UBSAN_float_cast_overflow',
'Perf-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All',
'Perf-Mac-Clang-MacMini7.1-CPU-AVX-x86_64-Release-All',
'Perf-Mac-Clang-MacMini7.1-GPU-IntelIris5100-x86_64-Release-All',
('Perf-Mac-Clang-MacMini7.1-GPU-IntelIris5100-x86_64-Release-All-'
'CommandBuffer'),
'Perf-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-All-Vulkan',
'Perf-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Release-All',
('Perf-Ubuntu17-GCC-Golo-GPU-QuadroP400-x86_64-Release-All-'
'Valgrind_AbandonGpuContext_SK_CPU_LIMIT_SSE41'),
('Perf-Ubuntu17-GCC-Golo-GPU-QuadroP400-x86_64-Release-All-'
'Valgrind_SK_CPU_LIMIT_SSE41'),
'Perf-Win10-Clang-AlphaR2-GPU-RadeonR9M470X-x86_64-Release-All-ANGLE',
'Perf-Win10-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Release-All-ANGLE',
'Perf-Win10-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Release-All-Vulkan',
'Perf-Win10-Clang-ShuttleC-GPU-GTX960-x86_64-Release-All-ANGLE',
'Perf-Win2k8-MSVC-GCE-CPU-AVX2-x86_64-Debug-All',
'Perf-Win2k8-MSVC-GCE-CPU-AVX2-x86_64-Release-All',
'Perf-iOS-Clang-iPadPro-GPU-GT7800-arm64-Release-All',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder:
test += api.platform('win', 64)
if 'Chromecast' in builder:
test += api.step_data(
'read chromecast ip',
stdout=api.raw_io.output('192.168.1.2:5555'))
if 'ChromeOS' in builder:
test += api.step_data(
'read chromeos ip',
stdout=api.raw_io.output('{"user_ip":"foo@127.0.0.1"}'))
yield test
builder = 'Perf-Win10-Clang-NUCD34010WYKH-GPU-IntelHD4400-x86_64-Release-All'
yield (
api.test('trybot') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = ('Perf-Android-Clang-NexusPlayer-CPU-Moorefield-x86-Debug-All-' +
'Android')
yield (
api.test('failed_push') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('push [START_DIR]/skia/resources/* '+
'/sdcard/revenge_of_the_skiabot/resources', retcode=1)
)
|
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.core.utils
from lfs.core.utils import LazyEncoder
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import StaticBlock
from lfs.catalog.models import File
from lfs.manage.static_blocks.forms import StaticBlockForm
# views
@permission_required("core.manage_shop")
def manage_static_blocks(request):
"""Dispatches to the first static block or to the add static block form.
"""
try:
sb = StaticBlock.objects.all()[0]
url = reverse("lfs_manage_static_block", kwargs={"id": sb.id})
except IndexError:
url = reverse("lfs_manage_no_static_blocks")
return HttpResponseRedirect(url)
@permission_required("core.manage_shop")
def manage_static_block(request, id, template_name="manage/static_block/static_block.html"):
"""Displays the main form to manage static blocks.
"""
sb = get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
form = StaticBlockForm(instance=sb, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_block", kwargs={"id": sb.id}),
msg=_(u"Static block has been saved."),
)
else:
form = StaticBlockForm(instance=sb)
return render(request, template_name, {
"static_block": sb,
"static_blocks": StaticBlock.objects.all(),
"files": files(request, sb),
"form": form,
"current_id": int(id),
})
@permission_required("core.manage_shop")
def no_static_blocks(request, template_name="manage/static_block/no_static_blocks.html"):
"""Displays that no static blocks exist.
"""
return render(request, template_name, {})
# parts
@permission_required("core.manage_shop")
def files(request, sb, template_name="manage/static_block/files.html"):
"""Displays the files tab of the passed static block.
"""
return render_to_string(template_name, request=request, context={
"static_block": sb,
})
@permission_required("core.manage_shop")
def list_files(request, sb, template_name="manage/static_block/files-list.html"):
"""Displays the files tab of the passed static block.
"""
return files(request, sb, template_name=template_name)
# actions
@permission_required("core.manage_shop")
def update_files(request, id):
"""
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
action = request.POST.get("action")
if action == "delete":
message = _(u"Files has been deleted.")
for key in request.POST.keys():
if key.startswith("delete-"):
try:
id = key.split("-")[1]
file = File.objects.get(pk=id).delete()
except (IndexError, ObjectDoesNotExist):
pass
elif action == "update":
message = _(u"Files has been updated.")
for key, value in request.POST.items():
if key.startswith("title-"):
id = key.split("-")[1]
try:
file = File.objects.get(pk=id)
except File.ObjectDoesNotExist:
pass
else:
file.title = value
file.save()
elif key.startswith("position-"):
try:
id = key.split("-")[1]
file = File.objects.get(pk=id)
except (IndexError, ObjectDoesNotExist):
pass
else:
file.position = value
file.save()
for i, file in enumerate(static_block.files.all()):
file.position = (i + 1) * 10
file.save()
html = (
("#files-list", list_files(request, static_block)),
)
result = json.dumps({
"html": html,
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def reload_files(request, id):
"""
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
result = list_files(request, static_block)
result = json.dumps({
"html": result,
"message": _(u"Files has been added."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def add_files(request, id):
"""Adds files to static block with passed id.
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
for file_content in request.FILES.getlist("files[]"):
file = File(content=static_block, title=file_content.name)
file.file.save(file_content.name, file_content, save=True)
ctype = ContentType.objects.get_for_model(static_block)
# Refresh positions
for i, file in enumerate(File.objects.filter(content_type=ctype, content_id=static_block.id)):
file.position = (i + 1) * 10
file.save()
result = json.dumps({"name": file_content.name, "type": "image/jpeg", "size": "123456789"})
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def add_static_block(request, template_name="manage/static_block/add_static_block.html"):
"""Provides a form to add a new static block.
"""
if request.method == "POST":
form = StaticBlockForm(data=request.POST)
if form.is_valid():
new_sb = form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_block", kwargs={"id": new_sb.id}),
msg=_(u"Static block has been added."),
)
else:
form = StaticBlockForm()
return render(request, template_name, {
"form": form,
"static_blocks": StaticBlock.objects.all(),
"came_from": (request.POST if request.method == 'POST' else request.GET).get("came_from",
reverse("lfs_manage_static_blocks")),
})
@permission_required("core.manage_shop")
def preview_static_block(request, id, template_name="manage/static_block/preview.html"):
"""Displays a preview of an static block
"""
sb = get_object_or_404(StaticBlock, pk=id)
return render(request, template_name, {
"static_block": sb,
})
@permission_required("core.manage_shop")
@require_POST
def sort_static_blocks(request):
"""Sorts static blocks after drag 'n drop.
"""
static_blocks = request.POST.get("objs", "").split('&')
assert (isinstance(static_blocks, list))
if len(static_blocks) > 0:
position = 10
for sb_str in static_blocks:
sb_id = sb_str.split('=')[1]
sb_obj = StaticBlock.objects.get(pk=sb_id)
sb_obj.position = position
sb_obj.save()
position = position + 10
result = json.dumps({
"message": _(u"The static blocks have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
@require_POST
def delete_static_block(request, id):
"""Deletes static block with passed id.
"""
sb = get_object_or_404(StaticBlock, pk=id)
sb.delete()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_blocks"),
msg=_(u"Static block has been deleted."),
)
|
#!/usr/bin/env python
import os
from argparse import ArgumentParser
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Boolean, or_
from sqlalchemy.orm import relationship, sessionmaker
from tabulate import tabulate
from update_genbank_assembly_stats import Species, Assembly
Base = declarative_base()
def parse_args():
parser = ArgumentParser()
parser.add_argument('db', help='Path to database')
parser.add_argument('query')
return parser.parse_args()
def main():
opts = parse_args()
query = opts.query
engine = create_engine('sqlite:///%s' % os.path.abspath(opts.db))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
speciess = session.query(Species).filter(or_(Species.common_name == query, Species.name == query, Species.genus == query, Species.family == query, Species.order == query, Species.class_ == query)).all()
table = []
for species in speciess:
subtable = []
for assembly in species.assemblies:
subtable.append(['%s (%s)' % (species.name, species.common_name), assembly.name, assembly.accession, float(assembly.scaffold_n50)/1000000, float(assembly.contig_n50)/1000000, float(assembly.num_ns)/assembly.size, float(assembly.num_masked)/assembly.size, assembly.size - assembly.num_ns])
subtable.sort(key=lambda x: x[3], reverse=True)
table += subtable
session.close()
headers = ["Species", "Name", "Accession", "Scaffold N50 (Mb)", "Contig N50 (Mb)", "%N", "%msk", "Effective size"]
print tabulate(table, headers, tablefmt="fancy_grid")
if __name__ == '__main__':
main()
|
import os
import ssl
import math
import time
import codecs
import typing
import asyncio
import logging
import itertools
import collections
from asyncio import Event, sleep
from collections import defaultdict
from functools import partial
from elasticsearch import ConnectionTimeout
from prometheus_client import Counter, Info, Histogram, Gauge
import lbry
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
from lbry.schema.result import Outputs
from lbry.wallet.server.db.db import HubDB
from lbry.wallet.server.websocket import AdminWebSocket
from lbry.wallet.rpc.framing import NewlineFramer
import lbry.wallet.server.version as VERSION
from lbry.wallet.rpc import (
RPCSession, JSONRPCAutoDetect, JSONRPCConnection,
handler_invocation, RPCError, Request, JSONRPC, Notification, Batch
)
from lbry.wallet.server import util
from lbry.wallet.server.hash import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, Base58Error
from lbry.wallet.server.daemon import DaemonError
from lbry.wallet.server.db.elasticsearch import SearchIndex
if typing.TYPE_CHECKING:
from lbry.wallet.server.env import Env
from lbry.wallet.server.daemon import Daemon
from lbry.wallet.server.mempool import MemPool
BAD_REQUEST = 1
DAEMON_ERROR = 2
log = logging.getLogger(__name__)
def scripthash_to_hashX(scripthash: str) -> bytes:
try:
bin_hash = hex_str_to_hash(scripthash)
if len(bin_hash) == 32:
return bin_hash[:HASHX_LEN]
except Exception:
pass
raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash')
def non_negative_integer(value) -> int:
"""Return param value it is or can be converted to a non-negative
integer, otherwise raise an RPCError."""
try:
value = int(value)
if value >= 0:
return value
except ValueError:
pass
raise RPCError(BAD_REQUEST,
f'{value} should be a non-negative integer')
def assert_boolean(value) -> bool:
"""Return param value it is boolean otherwise raise an RPCError."""
if value in (False, True):
return value
raise RPCError(BAD_REQUEST, f'{value} should be a boolean value')
def assert_tx_hash(value: str) -> None:
"""Raise an RPCError if the value is not a valid transaction
hash."""
try:
if len(util.hex_to_bytes(value)) == 32:
return
except Exception:
pass
raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash')
class Semaphores:
"""For aiorpcX's semaphore handling."""
def __init__(self, semaphores):
self.semaphores = semaphores
self.acquired = []
async def __aenter__(self):
for semaphore in self.semaphores:
await semaphore.acquire()
self.acquired.append(semaphore)
async def __aexit__(self, exc_type, exc_value, traceback):
for semaphore in self.acquired:
semaphore.release()
class SessionGroup:
def __init__(self, gid: int):
self.gid = gid
# Concurrency per group
self.semaphore = asyncio.Semaphore(20)
NAMESPACE = "wallet_server"
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
class SessionManager:
"""Holds global state about all sessions."""
version_info_metric = Info(
'build', 'Wallet server build info (e.g. version, commit hash)', namespace=NAMESPACE
)
version_info_metric.info({
'build': BUILD,
"commit": COMMIT_HASH,
"docker_tag": DOCKER_TAG,
'version': lbry.__version__,
"min_version": util.version_string(VERSION.PROTOCOL_MIN),
"cpu_count": str(os.cpu_count())
})
session_count_metric = Gauge("session_count", "Number of connected client sessions", namespace=NAMESPACE,
labelnames=("version",))
request_count_metric = Counter("requests_count", "Number of requests received", namespace=NAMESPACE,
labelnames=("method", "version"))
tx_request_count_metric = Counter("requested_transaction", "Number of transactions requested", namespace=NAMESPACE)
tx_replied_count_metric = Counter("replied_transaction", "Number of transactions responded", namespace=NAMESPACE)
urls_to_resolve_count_metric = Counter("urls_to_resolve", "Number of urls to resolve", namespace=NAMESPACE)
resolved_url_count_metric = Counter("resolved_url", "Number of resolved urls", namespace=NAMESPACE)
interrupt_count_metric = Counter("interrupt", "Number of interrupted queries", namespace=NAMESPACE)
db_operational_error_metric = Counter(
"operational_error", "Number of queries that raised operational errors", namespace=NAMESPACE
)
db_error_metric = Counter(
"internal_error", "Number of queries raising unexpected errors", namespace=NAMESPACE
)
executor_time_metric = Histogram(
"executor_time", "SQLite executor times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
)
pending_query_metric = Gauge(
"pending_queries_count", "Number of pending and running sqlite queries", namespace=NAMESPACE
)
client_version_metric = Counter(
"clients", "Number of connections received per client version",
namespace=NAMESPACE, labelnames=("version",)
)
address_history_metric = Histogram(
"address_history", "Time to fetch an address history",
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
)
notifications_in_flight_metric = Gauge(
"notifications_in_flight", "Count of notifications in flight",
namespace=NAMESPACE
)
notifications_sent_metric = Histogram(
"notifications_sent", "Time to send an address notification",
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
)
def __init__(self, env: 'Env', db: HubDB, mempool: 'MemPool', history_cache, resolve_cache, resolve_outputs_cache,
daemon: 'Daemon', shutdown_event: asyncio.Event,
on_available_callback: typing.Callable[[], None], on_unavailable_callback: typing.Callable[[], None]):
env.max_send = max(350000, env.max_send)
self.env = env
self.db = db
self.on_available_callback = on_available_callback
self.on_unavailable_callback = on_unavailable_callback
self.daemon = daemon
self.mempool = mempool
self.shutdown_event = shutdown_event
self.logger = util.class_logger(__name__, self.__class__.__name__)
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
self.sessions: typing.Dict[int, 'LBRYElectrumX'] = {}
self.hashx_subscriptions_by_session: typing.DefaultDict[str, typing.Set[int]] = defaultdict(set)
self.mempool_statuses = {}
self.cur_group = SessionGroup(0)
self.txs_sent = 0
self.start_time = time.time()
self.history_cache = history_cache
self.resolve_cache = resolve_cache
self.resolve_outputs_cache = resolve_outputs_cache
self.notified_height: typing.Optional[int] = None
# Cache some idea of room to avoid recounting on each subscription
self.subs_room = 0
self.session_event = Event()
# Search index
self.search_index = SearchIndex(
self.env.es_index_prefix, self.env.database_query_timeout,
elastic_host=env.elastic_host, elastic_port=env.elastic_port
)
async def _start_server(self, kind, *args, **kw_args):
loop = asyncio.get_event_loop()
if kind == 'RPC':
protocol_class = LocalRPC
else:
protocol_class = self.env.coin.SESSIONCLS
protocol_factory = partial(protocol_class, self, self.db,
self.mempool, kind)
host, port = args[:2]
try:
self.servers[kind] = await loop.create_server(protocol_factory, *args, **kw_args)
except OSError as e: # don't suppress CancelledError
self.logger.error(f'{kind} server failed to listen on {host}:'
f'{port:d} :{e!r}')
else:
self.logger.info(f'{kind} server listening on {host}:{port:d}')
async def _start_external_servers(self):
"""Start listening on TCP and SSL ports, but only if the respective
port was given in the environment.
"""
env = self.env
host = env.cs_host(for_rpc=False)
if env.tcp_port is not None:
await self._start_server('TCP', host, env.tcp_port)
if env.ssl_port is not None:
sslc = ssl.SSLContext(ssl.PROTOCOL_TLS)
sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile)
await self._start_server('SSL', host, env.ssl_port, ssl=sslc)
async def _close_servers(self, kinds):
"""Close the servers of the given kinds (TCP etc.)."""
if kinds:
self.logger.info('closing down {} listening servers'
.format(', '.join(kinds)))
for kind in kinds:
server = self.servers.pop(kind, None)
if server:
server.close()
await server.wait_closed()
async def _manage_servers(self):
paused = False
max_sessions = self.env.max_sessions
low_watermark = int(max_sessions * 0.95)
while True:
await self.session_event.wait()
self.session_event.clear()
if not paused and len(self.sessions) >= max_sessions:
self.on_unavailable_callback()
self.logger.info(f'maximum sessions {max_sessions:,d} '
f'reached, stopping new connections until '
f'count drops to {low_watermark:,d}')
await self._close_servers(['TCP', 'SSL'])
paused = True
# Start listening for incoming connections if paused and
# session count has fallen
if paused and len(self.sessions) <= low_watermark:
self.on_available_callback()
self.logger.info('resuming listening for incoming connections')
await self._start_external_servers()
paused = False
def _group_map(self):
group_map = defaultdict(list)
for session in self.sessions.values():
group_map[session.group].append(session)
return group_map
def _sub_count(self) -> int:
return sum(s.sub_count() for s in self.sessions.values())
def _lookup_session(self, session_id):
try:
session_id = int(session_id)
except Exception:
pass
else:
for session in self.sessions.values():
if session.session_id == session_id:
return session
return None
async def _for_each_session(self, session_ids, operation):
if not isinstance(session_ids, list):
raise RPCError(BAD_REQUEST, 'expected a list of session IDs')
result = []
for session_id in session_ids:
session = self._lookup_session(session_id)
if session:
result.append(await operation(session))
else:
result.append(f'unknown session: {session_id}')
return result
async def _clear_stale_sessions(self):
"""Cut off sessions that haven't done anything for 10 minutes."""
session_timeout = self.env.session_timeout
while True:
await sleep(session_timeout // 10)
stale_cutoff = time.perf_counter() - session_timeout
stale_sessions = [session for session in self.sessions.values()
if session.last_recv < stale_cutoff]
if stale_sessions:
text = ', '.join(str(session.session_id)
for session in stale_sessions)
self.logger.info(f'closing stale connections {text}')
# Give the sockets some time to close gracefully
if stale_sessions:
await asyncio.wait([
session.close(force_after=session_timeout // 10) for session in stale_sessions
])
# Consolidate small groups
group_map = self._group_map()
groups = [group for group, sessions in group_map.items()
if len(sessions) <= 5] # fixme: apply session cost here
if len(groups) > 1:
new_group = groups[-1]
for group in groups:
for session in group_map[group]:
session.group = new_group
def _get_info(self):
"""A summary of server state."""
group_map = self._group_map()
method_counts = collections.defaultdict(int)
error_count = 0
logged = 0
paused = 0
pending_requests = 0
closing = 0
for s in self.sessions.values():
error_count += s.errors
if s.log_me:
logged += 1
if not s._can_send.is_set():
paused += 1
pending_requests += s.count_pending_items()
if s.is_closing():
closing += 1
for request, _ in s.connection._requests.values():
method_counts[request.method] += 1
return {
'closing': closing,
'daemon': self.daemon.logged_url(),
'daemon_height': self.daemon.cached_height(),
'db_height': self.db.db_height,
'errors': error_count,
'groups': len(group_map),
'logged': logged,
'paused': paused,
'pid': os.getpid(),
'peers': [],
'requests': pending_requests,
'method_counts': method_counts,
'sessions': self.session_count(),
'subs': self._sub_count(),
'txs_sent': self.txs_sent,
'uptime': util.formatted_time(time.time() - self.start_time),
'version': lbry.__version__,
}
def _group_data(self):
"""Returned to the RPC 'groups' call."""
result = []
group_map = self._group_map()
for group, sessions in group_map.items():
result.append([group.gid,
len(sessions),
sum(s.bw_charge for s in sessions),
sum(s.count_pending_items() for s in sessions),
sum(s.txs_sent for s in sessions),
sum(s.sub_count() for s in sessions),
sum(s.recv_count for s in sessions),
sum(s.recv_size for s in sessions),
sum(s.send_count for s in sessions),
sum(s.send_size for s in sessions),
])
return result
async def _electrum_and_raw_headers(self, height):
raw_header = await self.raw_header(height)
electrum_header = self.env.coin.electrum_header(raw_header, height)
return electrum_header, raw_header
async def _refresh_hsub_results(self, height):
"""Refresh the cached header subscription responses to be for height,
and record that as notified_height.
"""
# Paranoia: a reorg could race and leave db_height lower
height = min(height, self.db.db_height)
electrum, raw = await self._electrum_and_raw_headers(height)
self.hsub_results = (electrum, {'hex': raw.hex(), 'height': height})
self.notified_height = height
# --- LocalRPC command handlers
async def rpc_add_peer(self, real_name):
"""Add a peer.
real_name: "bch.electrumx.cash t50001 s50002" for example
"""
await self._notify_peer(real_name)
return f"peer '{real_name}' added"
async def rpc_disconnect(self, session_ids):
"""Disconnect sessions.
session_ids: array of session IDs
"""
async def close(session):
"""Close the session's transport."""
await session.close(force_after=2)
return f'disconnected {session.session_id}'
return await self._for_each_session(session_ids, close)
async def rpc_log(self, session_ids):
"""Toggle logging of sessions.
session_ids: array of session IDs
"""
async def toggle_logging(session):
"""Toggle logging of the session."""
session.toggle_logging()
return f'log {session.session_id}: {session.log_me}'
return await self._for_each_session(session_ids, toggle_logging)
async def rpc_daemon_url(self, daemon_url):
"""Replace the daemon URL."""
daemon_url = daemon_url or self.env.daemon_url
try:
self.daemon.set_url(daemon_url)
except Exception as e:
raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}')
return f'now using daemon at {self.daemon.logged_url()}'
async def rpc_stop(self):
"""Shut down the server cleanly."""
self.shutdown_event.set()
return 'stopping'
async def rpc_getinfo(self):
"""Return summary information about the server process."""
return self._get_info()
async def rpc_groups(self):
"""Return statistics about the session groups."""
return self._group_data()
async def rpc_peers(self):
"""Return a list of data about server peers."""
return self.env.peer_hubs
async def rpc_query(self, items, limit):
"""Return a list of data about server peers."""
coin = self.env.coin
db = self.db
lines = []
def arg_to_hashX(arg):
try:
script = bytes.fromhex(arg)
lines.append(f'Script: {arg}')
return coin.hashX_from_script(script)
except ValueError:
pass
try:
hashX = coin.address_to_hashX(arg)
except Base58Error as e:
lines.append(e.args[0])
return None
lines.append(f'Address: {arg}')
return hashX
for arg in items:
hashX = arg_to_hashX(arg)
if not hashX:
continue
n = None
history = await db.limited_history(hashX, limit=limit)
for n, (tx_hash, height) in enumerate(history):
lines.append(f'History #{n:,d}: height {height:,d} '
f'tx_hash {hash_to_hex_str(tx_hash)}')
if n is None:
lines.append('No history found')
n = None
utxos = await db.all_utxos(hashX)
for n, utxo in enumerate(utxos, start=1):
lines.append(f'UTXO #{n:,d}: tx_hash '
f'{hash_to_hex_str(utxo.tx_hash)} '
f'tx_pos {utxo.tx_pos:,d} height '
f'{utxo.height:,d} value {utxo.value:,d}')
if n == limit:
break
if n is None:
lines.append('No UTXOs found')
balance = sum(utxo.value for utxo in utxos)
lines.append(f'Balance: {coin.decimal_value(balance):,f} '
f'{coin.SHORTNAME}')
return lines
# async def rpc_reorg(self, count):
# """Force a reorg of the given number of blocks.
#
# count: number of blocks to reorg
# """
# count = non_negative_integer(count)
# if not self.bp.force_chain_reorg(count):
# raise RPCError(BAD_REQUEST, 'still catching up with daemon')
# return f'scheduled a reorg of {count:,d} blocks'
# --- External Interface
async def serve(self, mempool, server_listening_event):
"""Start the RPC server if enabled. When the event is triggered,
start TCP and SSL servers."""
try:
if self.env.rpc_port is not None:
await self._start_server('RPC', self.env.cs_host(for_rpc=True),
self.env.rpc_port)
self.logger.info(f'max session count: {self.env.max_sessions:,d}')
self.logger.info(f'session timeout: '
f'{self.env.session_timeout:,d} seconds')
self.logger.info(f'max response size {self.env.max_send:,d} bytes')
if self.env.drop_client is not None:
self.logger.info(f'drop clients matching: {self.env.drop_client.pattern}')
# Start notifications; initialize hsub_results
await mempool.start(self.db.db_height, self)
await self.start_other()
await self._start_external_servers()
server_listening_event.set()
self.on_available_callback()
# Peer discovery should start after the external servers
# because we connect to ourself
await asyncio.wait([
self._clear_stale_sessions(),
self._manage_servers()
])
except Exception as err:
if not isinstance(err, asyncio.CancelledError):
log.exception("hub server died")
raise err
finally:
await self._close_servers(list(self.servers.keys()))
log.warning("disconnect %i sessions", len(self.sessions))
if self.sessions:
await asyncio.wait([
session.close(force_after=1) for session in self.sessions.values()
])
await self.stop_other()
async def start_other(self):
pass
async def stop_other(self):
pass
def session_count(self) -> int:
"""The number of connections that we've sent something to."""
return len(self.sessions)
async def daemon_request(self, method, *args):
"""Catch a DaemonError and convert it to an RPCError."""
try:
return await getattr(self.daemon, method)(*args)
except DaemonError as e:
raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None
async def raw_header(self, height):
"""Return the binary header at the given height."""
try:
return await self.db.raw_header(height)
except IndexError:
raise RPCError(BAD_REQUEST, f'height {height:,d} '
'out of range') from None
async def electrum_header(self, height):
"""Return the deserialized header at the given height."""
electrum_header, _ = await self._electrum_and_raw_headers(height)
return electrum_header
async def broadcast_transaction(self, raw_tx):
hex_hash = await self.daemon.broadcast_transaction(raw_tx)
self.txs_sent += 1
return hex_hash
async def limited_history(self, hashX):
"""A caching layer."""
if hashX not in self.history_cache:
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage
# on bloated history requests, and uses a smaller divisor
# so large requests are logged before refusing them.
limit = self.env.max_send // 97
self.history_cache[hashX] = await self.db.limited_history(hashX, limit=limit)
return self.history_cache[hashX]
def _notify_peer(self, peer):
notify_tasks = [
session.send_notification('blockchain.peers.subscribe', [peer])
for session in self.sessions.values() if session.subscribe_peers
]
if notify_tasks:
self.logger.info(f'notify {len(notify_tasks)} sessions of new peers')
asyncio.create_task(asyncio.wait(notify_tasks))
async def _notify_sessions(self, height, touched, new_touched):
"""Notify sessions about height changes and touched addresses."""
height_changed = height != self.notified_height
if height_changed:
await self._refresh_hsub_results(height)
if not self.sessions:
return
if height_changed:
header_tasks = [
session.send_notification('blockchain.headers.subscribe', (self.hsub_results[session.subscribe_headers_raw], ))
for session in self.sessions.values() if session.subscribe_headers
]
if header_tasks:
self.logger.info(f'notify {len(header_tasks)} sessions of new header')
asyncio.create_task(asyncio.wait(header_tasks))
for hashX in touched.intersection(self.mempool_statuses.keys()):
self.mempool_statuses.pop(hashX, None)
# self.bp._chain_executor
await asyncio.get_event_loop().run_in_executor(
None, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
)
if touched or new_touched or (height_changed and self.mempool_statuses):
notified_hashxs = 0
session_hashxes_to_notify = defaultdict(list)
to_notify = touched if height_changed else new_touched
for hashX in to_notify:
if hashX not in self.hashx_subscriptions_by_session:
continue
for session_id in self.hashx_subscriptions_by_session[hashX]:
session_hashxes_to_notify[session_id].append(hashX)
notified_hashxs += 1
for session_id, hashXes in session_hashxes_to_notify.items():
asyncio.create_task(self.sessions[session_id].send_history_notifications(*hashXes))
if session_hashxes_to_notify:
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
def add_session(self, session):
self.sessions[id(session)] = session
self.session_event.set()
gid = int(session.start_time - self.start_time) // 900
if self.cur_group.gid != gid:
self.cur_group = SessionGroup(gid)
return self.cur_group
def remove_session(self, session):
"""Remove a session from our sessions list if there."""
session_id = id(session)
for hashX in session.hashX_subs:
sessions = self.hashx_subscriptions_by_session[hashX]
sessions.remove(session_id)
if not sessions:
self.hashx_subscriptions_by_session.pop(hashX)
self.sessions.pop(session_id)
self.session_event.set()
class SessionBase(RPCSession):
"""Base class of ElectrumX JSON sessions.
Each session runs its tasks in asynchronous parallelism with other
sessions.
"""
MAX_CHUNK_SIZE = 40960
session_counter = itertools.count()
request_handlers: typing.Dict[str, typing.Callable] = {}
version = '0.5.7'
def __init__(self, session_manager: 'LBRYSessionManager', db: 'LevelDB', mempool: 'MemPool', kind: str):
connection = JSONRPCConnection(JSONRPCAutoDetect)
self.env = session_manager.env
super().__init__(connection=connection)
self.logger = util.class_logger(__name__, self.__class__.__name__)
self.session_manager = session_manager
self.db = db
self.mempool = mempool
self.kind = kind # 'RPC', 'TCP' etc.
self.coin = self.env.coin
self.anon_logs = self.env.anon_logs
self.txs_sent = 0
self.log_me = False
self.daemon_request = self.session_manager.daemon_request
# Hijack the connection so we can log messages
self._receive_message_orig = self.connection.receive_message
self.connection.receive_message = self.receive_message
def default_framer(self):
return NewlineFramer(self.env.max_receive)
def peer_address_str(self, *, for_log=True):
"""Returns the peer's IP address and port as a human-readable
string, respecting anon logs if the output is for a log."""
if for_log and self.anon_logs:
return 'xx.xx.xx.xx:xx'
return super().peer_address_str()
def receive_message(self, message):
if self.log_me:
self.logger.info(f'processing {message}')
return self._receive_message_orig(message)
def toggle_logging(self):
self.log_me = not self.log_me
def connection_made(self, transport):
"""Handle an incoming client connection."""
super().connection_made(transport)
self.session_id = next(self.session_counter)
context = {'conn_id': f'{self.session_id}'}
self.logger = util.ConnectionLogger(self.logger, context)
self.group = self.session_manager.add_session(self)
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
peer_addr_str = self.peer_address_str()
self.logger.info(f'{self.kind} {peer_addr_str}, '
f'{self.session_manager.session_count():,d} total')
def connection_lost(self, exc):
"""Handle client disconnection."""
super().connection_lost(exc)
self.session_manager.remove_session(self)
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
msg = ''
if not self._can_send.is_set():
msg += ' whilst paused'
if self.send_size >= 1024*1024:
msg += ('. Sent {:,d} bytes in {:,d} messages'
.format(self.send_size, self.send_count))
if msg:
msg = 'disconnected' + msg
self.logger.info(msg)
def count_pending_items(self):
return len(self.connection.pending_requests())
def semaphore(self):
return Semaphores([self.group.semaphore])
def sub_count(self):
return 0
async def handle_request(self, request):
"""Handle an incoming request. ElectrumX doesn't receive
notifications from client sessions.
"""
self.session_manager.request_count_metric.labels(method=request.method, version=self.client_version).inc()
if isinstance(request, Request):
handler = self.request_handlers.get(request.method)
handler = partial(handler, self)
else:
handler = None
coro = handler_invocation(handler, request)()
return await coro
class LBRYSessionManager(SessionManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.websocket = None
# self.metrics = ServerLoadData()
# self.metrics_loop = None
self.running = False
if self.env.websocket_host is not None and self.env.websocket_port is not None:
self.websocket = AdminWebSocket(self)
# async def process_metrics(self):
# while self.running:
# data = self.metrics.to_json_and_reset({
# 'sessions': self.session_count(),
# 'height': self.db.db_height,
# })
# if self.websocket is not None:
# self.websocket.send_message(data)
# await asyncio.sleep(1)
async def start_other(self):
self.running = True
if self.websocket is not None:
await self.websocket.start()
async def stop_other(self):
self.running = False
if self.websocket is not None:
await self.websocket.stop()
class LBRYElectrumX(SessionBase):
"""A TCP server that handles incoming Electrum connections."""
PROTOCOL_MIN = VERSION.PROTOCOL_MIN
PROTOCOL_MAX = VERSION.PROTOCOL_MAX
max_errors = math.inf # don't disconnect people for errors! let them happen...
session_manager: LBRYSessionManager
version = lbry.__version__
cached_server_features = {}
@classmethod
def initialize_request_handlers(cls):
cls.request_handlers.update({
'blockchain.block.get_chunk': cls.block_get_chunk,
'blockchain.block.get_header': cls.block_get_header,
'blockchain.estimatefee': cls.estimatefee,
'blockchain.relayfee': cls.relayfee,
# 'blockchain.scripthash.get_balance': cls.scripthash_get_balance,
'blockchain.scripthash.get_history': cls.scripthash_get_history,
'blockchain.scripthash.get_mempool': cls.scripthash_get_mempool,
# 'blockchain.scripthash.listunspent': cls.scripthash_listunspent,
'blockchain.scripthash.subscribe': cls.scripthash_subscribe,
'blockchain.transaction.broadcast': cls.transaction_broadcast,
'blockchain.transaction.get': cls.transaction_get,
'blockchain.transaction.get_batch': cls.transaction_get_batch,
'blockchain.transaction.info': cls.transaction_info,
'blockchain.transaction.get_merkle': cls.transaction_merkle,
# 'server.add_peer': cls.add_peer,
'server.banner': cls.banner,
'server.payment_address': cls.payment_address,
'server.donation_address': cls.donation_address,
'server.features': cls.server_features_async,
'server.peers.subscribe': cls.peers_subscribe,
'server.version': cls.server_version,
'blockchain.transaction.get_height': cls.transaction_get_height,
'blockchain.claimtrie.search': cls.claimtrie_search,
'blockchain.claimtrie.resolve': cls.claimtrie_resolve,
'blockchain.claimtrie.getclaimbyid': cls.claimtrie_getclaimbyid,
# 'blockchain.claimtrie.getclaimsbyids': cls.claimtrie_getclaimsbyids,
'blockchain.block.get_server_height': cls.get_server_height,
'mempool.get_fee_histogram': cls.mempool_compact_histogram,
'blockchain.block.headers': cls.block_headers,
'server.ping': cls.ping,
'blockchain.headers.subscribe': cls.headers_subscribe_False,
# 'blockchain.address.get_balance': cls.address_get_balance,
'blockchain.address.get_history': cls.address_get_history,
'blockchain.address.get_mempool': cls.address_get_mempool,
# 'blockchain.address.listunspent': cls.address_listunspent,
'blockchain.address.subscribe': cls.address_subscribe,
'blockchain.address.unsubscribe': cls.address_unsubscribe,
})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not LBRYElectrumX.request_handlers:
LBRYElectrumX.initialize_request_handlers()
if not LBRYElectrumX.cached_server_features:
LBRYElectrumX.set_server_features(self.env)
self.subscribe_headers = False
self.subscribe_headers_raw = False
self.subscribe_peers = False
self.connection.max_response_size = self.env.max_send
self.hashX_subs = {}
self.sv_seen = False
self.protocol_tuple = self.PROTOCOL_MIN
self.protocol_string = None
self.daemon = self.session_manager.daemon
self.db: LevelDB = self.session_manager.db
@classmethod
def protocol_min_max_strings(cls):
return [util.version_string(ver)
for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)]
@classmethod
def set_server_features(cls, env):
"""Return the server features dictionary."""
min_str, max_str = cls.protocol_min_max_strings()
cls.cached_server_features.update({
'hosts': env.hosts_dict(),
'pruning': None,
'server_version': cls.version,
'protocol_min': min_str,
'protocol_max': max_str,
'genesis_hash': env.coin.GENESIS_HASH,
'description': env.description,
'payment_address': env.payment_address,
'donation_address': env.donation_address,
'daily_fee': env.daily_fee,
'hash_function': 'sha256',
'trending_algorithm': 'fast_ar'
})
async def server_features_async(self):
return self.cached_server_features
@classmethod
def server_version_args(cls):
"""The arguments to a server.version RPC call to a peer."""
return [cls.version, cls.protocol_min_max_strings()]
def protocol_version_string(self):
return util.version_string(self.protocol_tuple)
def sub_count(self):
return len(self.hashX_subs)
async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]):
notifications = []
for hashX in hashXes:
alias = self.hashX_subs[hashX]
if len(alias) == 64:
method = 'blockchain.scripthash.subscribe'
else:
method = 'blockchain.address.subscribe'
start = time.perf_counter()
db_history = await self.session_manager.limited_history(hashX)
mempool = self.mempool.transaction_summaries(hashX)
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
f'{height:d}:'
for tx_hash, height in db_history)
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
f'{-tx.has_unconfirmed_inputs:d}:'
for tx in mempool)
if status:
status = sha256(status.encode()).hex()
else:
status = None
if mempool:
self.session_manager.mempool_statuses[hashX] = status
else:
self.session_manager.mempool_statuses.pop(hashX, None)
self.session_manager.address_history_metric.observe(time.perf_counter() - start)
notifications.append((method, (alias, status)))
print(f"notify {alias} {method}")
start = time.perf_counter()
self.session_manager.notifications_in_flight_metric.inc()
for method, args in notifications:
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
try:
await self.send_notifications(
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
)
self.session_manager.notifications_sent_metric.observe(time.perf_counter() - start)
finally:
self.session_manager.notifications_in_flight_metric.dec()
# def get_metrics_or_placeholder_for_api(self, query_name):
# """ Do not hold on to a reference to the metrics
# returned by this method past an `await` or
# you may be working with a stale metrics object.
# """
# if self.env.track_metrics:
# # return self.session_manager.metrics.for_api(query_name)
# else:
# return APICallMetrics(query_name)
# async def run_and_cache_query(self, query_name, kwargs):
# start = time.perf_counter()
# if isinstance(kwargs, dict):
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
# try:
# self.session_manager.pending_query_metric.inc()
# return await self.db.search_index.session_query(query_name, kwargs)
# except ConnectionTimeout:
# self.session_manager.interrupt_count_metric.inc()
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
# finally:
# self.session_manager.pending_query_metric.dec()
# self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
async def mempool_compact_histogram(self):
return [] #self.mempool.compact_fee_histogram()
async def claimtrie_search(self, **kwargs):
start = time.perf_counter()
if 'release_time' in kwargs:
release_time = kwargs.pop('release_time')
release_times = release_time if isinstance(release_time, list) else [release_time]
try:
kwargs['release_time'] = [format_release_time(release_time) for release_time in release_times]
except ValueError:
pass
try:
self.session_manager.pending_query_metric.inc()
if 'channel' in kwargs:
channel_url = kwargs.pop('channel')
_, channel_claim, _, _ = await self.db.resolve(channel_url)
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
return Outputs.to_base64([], [], 0, None, None)
kwargs['channel_id'] = channel_claim.claim_hash.hex()
return await self.session_manager.search_index.cached_search(kwargs)
except ConnectionTimeout:
self.session_manager.interrupt_count_metric.inc()
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
except TooManyClaimSearchParametersError as err:
await asyncio.sleep(2)
self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.",
self.peer_address()[0], err.key, err.limit)
return RPCError(1, str(err))
finally:
self.session_manager.pending_query_metric.dec()
self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
async def _cached_resolve_url(self, url):
if url not in self.session_manager.resolve_cache:
self.session_manager.resolve_cache[url] = await self.loop.run_in_executor(None, self.db._resolve, url)
return self.session_manager.resolve_cache[url]
async def claimtrie_resolve(self, *urls) -> str:
sorted_urls = tuple(sorted(urls))
self.session_manager.urls_to_resolve_count_metric.inc(len(sorted_urls))
try:
if sorted_urls in self.session_manager.resolve_outputs_cache:
return self.session_manager.resolve_outputs_cache[sorted_urls]
rows, extra = [], []
for url in urls:
if url not in self.session_manager.resolve_cache:
self.session_manager.resolve_cache[url] = await self._cached_resolve_url(url)
stream, channel, repost, reposted_channel = self.session_manager.resolve_cache[url]
if isinstance(channel, ResolveCensoredError):
rows.append(channel)
extra.append(channel.censor_row)
elif isinstance(stream, ResolveCensoredError):
rows.append(stream)
extra.append(stream.censor_row)
elif channel and not stream:
rows.append(channel)
# print("resolved channel", channel.name.decode())
if repost:
extra.append(repost)
if reposted_channel:
extra.append(reposted_channel)
elif stream:
# print("resolved stream", stream.name.decode())
rows.append(stream)
if channel:
# print("and channel", channel.name.decode())
extra.append(channel)
if repost:
extra.append(repost)
if reposted_channel:
extra.append(reposted_channel)
await asyncio.sleep(0)
self.session_manager.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
None, Outputs.to_base64, rows, extra, 0, None, None
)
return result
finally:
self.session_manager.resolved_url_count_metric.inc(len(sorted_urls))
async def get_server_height(self):
return self.db.db_height
async def transaction_get_height(self, tx_hash):
self.assert_tx_hash(tx_hash)
transaction_info = await self.daemon.getrawtransaction(tx_hash, True)
if transaction_info and 'hex' in transaction_info and 'confirmations' in transaction_info:
# an unconfirmed transaction from lbrycrdd will not have a 'confirmations' field
return (self.db.db_height - transaction_info['confirmations']) + 1
elif transaction_info and 'hex' in transaction_info:
return -1
return None
async def claimtrie_getclaimbyid(self, claim_id):
rows = []
extra = []
stream = await self.db.fs_getclaimbyid(claim_id)
if not stream:
stream = LookupError(f"Could not find claim at {claim_id}")
rows.append(stream)
return Outputs.to_base64(rows, extra, 0, None, None)
def assert_tx_hash(self, value):
'''Raise an RPCError if the value is not a valid transaction
hash.'''
try:
if len(util.hex_to_bytes(value)) == 32:
return
except Exception:
pass
raise RPCError(1, f'{value} should be a transaction hash')
async def subscribe_headers_result(self):
"""The result of a header subscription or notification."""
return self.session_manager.hsub_results[self.subscribe_headers_raw]
async def _headers_subscribe(self, raw):
"""Subscribe to get headers of new blocks."""
self.subscribe_headers_raw = assert_boolean(raw)
self.subscribe_headers = True
return await self.subscribe_headers_result()
async def headers_subscribe(self):
"""Subscribe to get raw headers of new blocks."""
return await self._headers_subscribe(True)
async def headers_subscribe_True(self, raw=True):
"""Subscribe to get headers of new blocks."""
return await self._headers_subscribe(raw)
async def headers_subscribe_False(self, raw=False):
"""Subscribe to get headers of new blocks."""
return await self._headers_subscribe(raw)
async def add_peer(self, features):
"""Add a peer (but only if the peer resolves to the source)."""
return await self.peer_mgr.on_add_peer(features, self.peer_address())
async def peers_subscribe(self):
"""Return the server peers as a list of (ip, host, details) tuples."""
self.subscribe_peers = True
return self.env.peer_hubs
async def address_status(self, hashX):
"""Returns an address status.
Status is a hex string, but must be None if there is no history.
"""
# Note history is ordered and mempool unordered in electrum-server
# For mempool, height is -1 if it has unconfirmed inputs, otherwise 0
db_history = await self.session_manager.limited_history(hashX)
mempool = self.mempool.transaction_summaries(hashX)
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
f'{height:d}:'
for tx_hash, height in db_history)
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
f'{-tx.has_unconfirmed_inputs:d}:'
for tx in mempool)
if status:
status = sha256(status.encode()).hex()
else:
status = None
if mempool:
self.session_manager.mempool_statuses[hashX] = status
else:
self.session_manager.mempool_statuses.pop(hashX, None)
return status
# async def hashX_listunspent(self, hashX):
# """Return the list of UTXOs of a script hash, including mempool
# effects."""
# utxos = await self.db.all_utxos(hashX)
# utxos = sorted(utxos)
# utxos.extend(await self.mempool.unordered_UTXOs(hashX))
# spends = await self.mempool.potential_spends(hashX)
#
# return [{'tx_hash': hash_to_hex_str(utxo.tx_hash),
# 'tx_pos': utxo.tx_pos,
# 'height': utxo.height, 'value': utxo.value}
# for utxo in utxos
# if (utxo.tx_hash, utxo.tx_pos) not in spends]
async def hashX_subscribe(self, hashX, alias):
self.hashX_subs[hashX] = alias
self.session_manager.hashx_subscriptions_by_session[hashX].add(id(self))
return await self.address_status(hashX)
async def hashX_unsubscribe(self, hashX, alias):
sessions = self.session_manager.hashx_subscriptions_by_session[hashX]
sessions.remove(id(self))
if not sessions:
self.hashX_subs.pop(hashX, None)
def address_to_hashX(self, address):
try:
return self.coin.address_to_hashX(address)
except Exception:
pass
raise RPCError(BAD_REQUEST, f'{address} is not a valid address')
# async def address_get_balance(self, address):
# """Return the confirmed and unconfirmed balance of an address."""
# hashX = self.address_to_hashX(address)
# return await self.get_balance(hashX)
async def address_get_history(self, address):
"""Return the confirmed and unconfirmed history of an address."""
hashX = self.address_to_hashX(address)
return await self.confirmed_and_unconfirmed_history(hashX)
async def address_get_mempool(self, address):
"""Return the mempool transactions touching an address."""
hashX = self.address_to_hashX(address)
return self.unconfirmed_history(hashX)
# async def address_listunspent(self, address):
# """Return the list of UTXOs of an address."""
# hashX = self.address_to_hashX(address)
# return await self.hashX_listunspent(hashX)
async def address_subscribe(self, *addresses):
"""Subscribe to an address.
address: the address to subscribe to"""
if len(addresses) > 1000:
raise RPCError(BAD_REQUEST, f'too many addresses in subscription request: {len(addresses)}')
results = []
for address in addresses:
results.append(await self.hashX_subscribe(self.address_to_hashX(address), address))
await asyncio.sleep(0)
return results
async def address_unsubscribe(self, address):
"""Unsubscribe an address.
address: the address to unsubscribe"""
hashX = self.address_to_hashX(address)
return await self.hashX_unsubscribe(hashX, address)
# async def get_balance(self, hashX):
# utxos = await self.db.all_utxos(hashX)
# confirmed = sum(utxo.value for utxo in utxos)
# unconfirmed = await self.mempool.balance_delta(hashX)
# return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
# async def scripthash_get_balance(self, scripthash):
# """Return the confirmed and unconfirmed balance of a scripthash."""
# hashX = scripthash_to_hashX(scripthash)
# return await self.get_balance(hashX)
def unconfirmed_history(self, hashX):
# Note unconfirmed history is unordered in electrum-server
# height is -1 if it has unconfirmed inputs, otherwise 0
return [{'tx_hash': hash_to_hex_str(tx.hash),
'height': -tx.has_unconfirmed_inputs,
'fee': tx.fee}
for tx in self.mempool.transaction_summaries(hashX)]
async def confirmed_and_unconfirmed_history(self, hashX):
# Note history is ordered but unconfirmed is unordered in e-s
history = await self.session_manager.limited_history(hashX)
conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height}
for tx_hash, height in history]
return conf + self.unconfirmed_history(hashX)
async def scripthash_get_history(self, scripthash):
"""Return the confirmed and unconfirmed history of a scripthash."""
hashX = scripthash_to_hashX(scripthash)
return await self.confirmed_and_unconfirmed_history(hashX)
async def scripthash_get_mempool(self, scripthash):
"""Return the mempool transactions touching a scripthash."""
hashX = scripthash_to_hashX(scripthash)
return self.unconfirmed_history(hashX)
# async def scripthash_listunspent(self, scripthash):
# """Return the list of UTXOs of a scripthash."""
# hashX = scripthash_to_hashX(scripthash)
# return await self.hashX_listunspent(hashX)
async def scripthash_subscribe(self, scripthash):
"""Subscribe to a script hash.
scripthash: the SHA256 hash of the script to subscribe to"""
hashX = scripthash_to_hashX(scripthash)
return await self.hashX_subscribe(hashX, scripthash)
async def _merkle_proof(self, cp_height, height):
max_height = self.db.db_height
if not height <= cp_height <= max_height:
raise RPCError(BAD_REQUEST,
f'require header height {height:,d} <= '
f'cp_height {cp_height:,d} <= '
f'chain height {max_height:,d}')
branch, root = await self.db.header_branch_and_root(cp_height + 1, height)
return {
'branch': [hash_to_hex_str(elt) for elt in branch],
'root': hash_to_hex_str(root),
}
async def block_headers(self, start_height, count, cp_height=0, b64=False):
"""Return count concatenated block headers as hex for the main chain;
starting at start_height.
start_height and count must be non-negative integers. At most
MAX_CHUNK_SIZE headers will be returned.
"""
start_height = non_negative_integer(start_height)
count = non_negative_integer(count)
cp_height = non_negative_integer(cp_height)
max_size = self.MAX_CHUNK_SIZE
count = min(count, max_size)
headers, count = await self.db.read_headers(start_height, count)
if b64:
headers = self.db.encode_headers(start_height, count, headers)
else:
headers = headers.hex()
result = {
'base64' if b64 else 'hex': headers,
'count': count,
'max': max_size
}
if count and cp_height:
last_height = start_height + count - 1
result.update(await self._merkle_proof(cp_height, last_height))
return result
async def block_get_chunk(self, index):
"""Return a chunk of block headers as a hexadecimal string.
index: the chunk index"""
index = non_negative_integer(index)
size = self.coin.CHUNK_SIZE
start_height = index * size
headers, _ = await self.db.read_headers(start_height, size)
return headers.hex()
async def block_get_header(self, height):
"""The deserialized header at a given height.
height: the header's height"""
height = non_negative_integer(height)
return await self.session_manager.electrum_header(height)
def is_tor(self):
"""Try to detect if the connection is to a tor hidden service we are
running."""
peername = self.peer_mgr.proxy_peername()
if not peername:
return False
peer_address = self.peer_address()
return peer_address and peer_address[0] == peername[0]
async def replaced_banner(self, banner):
network_info = await self.daemon_request('getnetworkinfo')
ni_version = network_info['version']
major, minor = divmod(ni_version, 1000000)
minor, revision = divmod(minor, 10000)
revision //= 100
daemon_version = f'{major:d}.{minor:d}.{revision:d}'
for pair in [
('$SERVER_VERSION', self.version),
('$DAEMON_VERSION', daemon_version),
('$DAEMON_SUBVERSION', network_info['subversion']),
('$PAYMENT_ADDRESS', self.env.payment_address),
('$DONATION_ADDRESS', self.env.donation_address),
]:
banner = banner.replace(*pair)
return banner
async def payment_address(self):
"""Return the payment address as a string, empty if there is none."""
return self.env.payment_address
async def donation_address(self):
"""Return the donation address as a string, empty if there is none."""
return self.env.donation_address
async def banner(self):
"""Return the server banner text."""
banner = f'You are connected to an {self.version} server.'
banner_file = self.env.banner_file
if banner_file:
try:
with codecs.open(banner_file, 'r', 'utf-8') as f:
banner = f.read()
except Exception as e:
self.logger.error(f'reading banner file {banner_file}: {e!r}')
else:
banner = await self.replaced_banner(banner)
return banner
async def relayfee(self):
"""The minimum fee a low-priority tx must pay in order to be accepted
to the daemon's memory pool."""
return await self.daemon_request('relayfee')
async def estimatefee(self, number):
"""The estimated transaction fee per kilobyte to be paid for a
transaction to be included within a certain number of blocks.
number: the number of blocks
"""
number = non_negative_integer(number)
return await self.daemon_request('estimatefee', number)
async def ping(self):
"""Serves as a connection keep-alive mechanism and for the client to
confirm the server is still responding.
"""
return None
async def server_version(self, client_name='', protocol_version=None):
"""Returns the server version as a string.
client_name: a string identifying the client
protocol_version: the protocol version spoken by the client
"""
if self.protocol_string is not None:
return self.version, self.protocol_string
if self.sv_seen and self.protocol_tuple >= (1, 4):
raise RPCError(BAD_REQUEST, f'server.version already sent')
self.sv_seen = True
if client_name:
client_name = str(client_name)
if self.env.drop_client is not None and \
self.env.drop_client.match(client_name):
self.close_after_send = True
raise RPCError(BAD_REQUEST, f'unsupported client: {client_name}')
if self.client_version != client_name[:17]:
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
self.client_version = client_name[:17]
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
self.session_manager.client_version_metric.labels(version=self.client_version).inc()
# Find the highest common protocol version. Disconnect if
# that protocol version in unsupported.
ptuple, client_min = util.protocol_version(protocol_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX)
if ptuple is None:
ptuple, client_min = util.protocol_version(protocol_version, (1, 1, 0), (1, 4, 0))
if ptuple is None:
self.close_after_send = True
raise RPCError(BAD_REQUEST, f'unsupported protocol version: {protocol_version}')
self.protocol_tuple = ptuple
self.protocol_string = util.version_string(ptuple)
return self.version, self.protocol_string
async def transaction_broadcast(self, raw_tx):
"""Broadcast a raw transaction to the network.
raw_tx: the raw transaction as a hexadecimal string"""
# This returns errors as JSON RPC errors, as is natural
try:
hex_hash = await self.session_manager.broadcast_transaction(raw_tx)
self.txs_sent += 1
# self.mempool.wakeup.set()
# await asyncio.sleep(0.5)
self.logger.info(f'sent tx: {hex_hash}')
return hex_hash
except DaemonError as e:
error, = e.args
message = error['message']
self.logger.info(f'error sending transaction: {message}')
raise RPCError(BAD_REQUEST, 'the transaction was rejected by '
f'network rules.\n\n{message}\n[{raw_tx}]')
async def transaction_info(self, tx_hash: str):
return (await self.transaction_get_batch(tx_hash))[tx_hash]
async def transaction_get_batch(self, *tx_hashes):
self.session_manager.tx_request_count_metric.inc(len(tx_hashes))
if len(tx_hashes) > 100:
raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}')
for tx_hash in tx_hashes:
assert_tx_hash(tx_hash)
batch_result = await self.db.get_transactions_and_merkles(tx_hashes)
needed_merkles = {}
for tx_hash in tx_hashes:
if tx_hash in batch_result and batch_result[tx_hash][0]:
continue
tx_hash_bytes = bytes.fromhex(tx_hash)[::-1]
mempool_tx = self.mempool.txs.get(tx_hash_bytes, None)
if mempool_tx:
raw_tx, block_hash = mempool_tx.raw_tx.hex(), None
else:
tx_info = await self.daemon_request('getrawtransaction', tx_hash, True)
raw_tx = tx_info['hex']
block_hash = tx_info.get('blockhash')
if block_hash:
block = await self.daemon.deserialised_block(block_hash)
height = block['height']
try:
pos = block['tx'].index(tx_hash)
except ValueError:
raise RPCError(BAD_REQUEST, f'tx hash {tx_hash} not in '
f'block {block_hash} at height {height:,d}')
needed_merkles[tx_hash] = raw_tx, block['tx'], pos, height
else:
batch_result[tx_hash] = [raw_tx, {'block_height': -1}]
if needed_merkles:
for tx_hash, (raw_tx, block_txs, pos, block_height) in needed_merkles.items():
batch_result[tx_hash] = raw_tx, {
'merkle': self._get_merkle_branch(block_txs, pos),
'pos': pos,
'block_height': block_height
}
await asyncio.sleep(0) # heavy call, give other tasks a chance
print("return tx batch")
for tx_hash, (_, info) in batch_result.items():
print(tx_hash, info['block_height'])
self.session_manager.tx_replied_count_metric.inc(len(tx_hashes))
return batch_result
async def transaction_get(self, tx_hash, verbose=False):
"""Return the serialized raw transaction given its hash
tx_hash: the transaction hash as a hexadecimal string
verbose: passed on to the daemon
"""
assert_tx_hash(tx_hash)
if verbose not in (True, False):
raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean')
return await self.daemon_request('getrawtransaction', tx_hash, verbose)
def _get_merkle_branch(self, tx_hashes, tx_pos):
"""Return a merkle branch to a transaction.
tx_hashes: ordered list of hex strings of tx hashes in a block
tx_pos: index of transaction in tx_hashes to create branch for
"""
hashes = [hex_str_to_hash(hash) for hash in tx_hashes]
branch, root = self.db.merkle.branch_and_root(hashes, tx_pos)
branch = [hash_to_hex_str(hash) for hash in branch]
return branch
async def transaction_merkle(self, tx_hash, height):
"""Return the markle branch to a confirmed transaction given its hash
and height.
tx_hash: the transaction hash as a hexadecimal string
height: the height of the block it is in
"""
assert_tx_hash(tx_hash)
result = await self.transaction_get_batch(tx_hash)
if tx_hash not in result or result[tx_hash][1]['block_height'] <= 0:
raise RPCError(BAD_REQUEST, f'tx hash {tx_hash} not in '
f'block at height {height:,d}')
return result[tx_hash][1]
class LocalRPC(SessionBase):
"""A local TCP RPC server session."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = 'RPC'
self.connection._max_response_size = 0
def protocol_version_string(self):
return 'RPC'
def get_from_possible_keys(dictionary, *keys):
for key in keys:
if key in dictionary:
return dictionary[key]
def format_release_time(release_time):
# round release time to 1000 so it caches better
# also set a default so we dont show claims in the future
def roundup_time(number, factor=360):
return int(1 + int(number / factor)) * factor
if isinstance(release_time, str) and len(release_time) > 0:
time_digits = ''.join(filter(str.isdigit, release_time))
time_prefix = release_time[:-len(time_digits)]
return time_prefix + str(roundup_time(int(time_digits)))
elif isinstance(release_time, int):
return roundup_time(release_time)
|
"""Utilities for processing .test files containing test case descriptions."""
import os.path
import os
import tempfile
import posixpath
import re
from os import remove, rmdir
import shutil
from abc import abstractmethod
import pytest # type: ignore # no pytest in typeshed
from typing import List, Tuple, Set, Optional, Iterator, Any, Dict, NamedTuple, Union
from mypy.test.config import test_data_prefix, test_temp_dir
root_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))
# File modify/create operation: copy module contents from source_path.
UpdateFile = NamedTuple('UpdateFile', [('module', str),
('source_path', str),
('target_path', str)])
# File delete operation: delete module file.
DeleteFile = NamedTuple('DeleteFile', [('module', str),
('path', str)])
FileOperation = Union[UpdateFile, DeleteFile]
def parse_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
path: str) -> Iterator['DataDrivenTestCase']:
"""Parse a single file from suite with test case descriptions.
NB: this function and DataDrivenTestCase were shared between the
myunit and pytest codepaths -- if something looks redundant,
that's likely the reason.
"""
base_path = suite.base_path
if suite.native_sep:
join = os.path.join
else:
join = posixpath.join # type: ignore
with open(path, encoding='utf-8') as f:
lst = f.readlines()
for i in range(len(lst)):
lst[i] = lst[i].rstrip('\n')
p = parse_test_data(lst, path)
# Process the parsed items. Each item has a header of form [id args],
# optionally followed by lines of text.
i = 0
while i < len(p):
ok = False
i0 = i
if p[i].id == 'case':
i += 1
files = [] # type: List[Tuple[str, str]] # path and contents
output_files = [] # type: List[Tuple[str, str]] # path and contents for output files
tcout = [] # type: List[str] # Regular output errors
tcout2 = {} # type: Dict[int, List[str]] # Output errors for incremental, runs 2+
deleted_paths = {} # type: Dict[int, Set[str]] # from run number of paths
stale_modules = {} # type: Dict[int, Set[str]] # from run number to module names
rechecked_modules = {} # type: Dict[ int, Set[str]] # from run number module names
triggered = [] # type: List[str] # Active triggers (one line per incremental step)
while i < len(p) and p[i].id != 'case':
if p[i].id == 'file' or p[i].id == 'outfile':
# Record an extra file needed for the test case.
arg = p[i].arg
assert arg is not None
contents = '\n'.join(p[i].data)
contents = expand_variables(contents)
file_entry = (join(base_path, arg), contents)
if p[i].id == 'file':
files.append(file_entry)
elif p[i].id == 'outfile':
output_files.append(file_entry)
elif p[i].id in ('builtins', 'builtins_py2'):
# Use an alternative stub file for the builtins module.
arg = p[i].arg
assert arg is not None
mpath = join(os.path.dirname(path), arg)
if p[i].id == 'builtins':
fnam = 'builtins.pyi'
else:
# Python 2
fnam = '__builtin__.pyi'
with open(mpath) as f:
files.append((join(base_path, fnam), f.read()))
elif p[i].id == 'typing':
# Use an alternative stub file for the typing module.
arg = p[i].arg
assert arg is not None
src_path = join(os.path.dirname(path), arg)
with open(src_path) as f:
files.append((join(base_path, 'typing.pyi'), f.read()))
elif re.match(r'stale[0-9]*$', p[i].id):
if p[i].id == 'stale':
passnum = 1
else:
passnum = int(p[i].id[len('stale'):])
assert passnum > 0
arg = p[i].arg
if arg is None:
stale_modules[passnum] = set()
else:
stale_modules[passnum] = {item.strip() for item in arg.split(',')}
elif re.match(r'rechecked[0-9]*$', p[i].id):
if p[i].id == 'rechecked':
passnum = 1
else:
passnum = int(p[i].id[len('rechecked'):])
arg = p[i].arg
if arg is None:
rechecked_modules[passnum] = set()
else:
rechecked_modules[passnum] = {item.strip() for item in arg.split(',')}
elif p[i].id == 'delete':
# File to delete during a multi-step test case
arg = p[i].arg
assert arg is not None
m = re.match(r'(.*)\.([0-9]+)$', arg)
assert m, 'Invalid delete section: {}'.format(arg)
num = int(m.group(2))
assert num >= 2, "Can't delete during step {}".format(num)
full = join(base_path, m.group(1))
deleted_paths.setdefault(num, set()).add(full)
elif p[i].id == 'out' or p[i].id == 'out1':
tcout = p[i].data
tcout = [expand_variables(line) for line in tcout]
if os.path.sep == '\\':
tcout = [fix_win_path(line) for line in tcout]
ok = True
elif re.match(r'out[0-9]*$', p[i].id):
passnum = int(p[i].id[3:])
assert passnum > 1
output = p[i].data
output = [expand_variables(line) for line in output]
if suite.native_sep and os.path.sep == '\\':
output = [fix_win_path(line) for line in output]
tcout2[passnum] = output
ok = True
elif p[i].id == 'triggered' and p[i].arg is None:
triggered = p[i].data
else:
raise ValueError(
'Invalid section header {} in {} at line {}'.format(
p[i].id, path, p[i].line))
i += 1
for passnum in stale_modules.keys():
if passnum not in rechecked_modules:
# If the set of rechecked modules isn't specified, make it the same as the set
# of modules with a stale public interface.
rechecked_modules[passnum] = stale_modules[passnum]
if (passnum in stale_modules
and passnum in rechecked_modules
and not stale_modules[passnum].issubset(rechecked_modules[passnum])):
raise ValueError(
('Stale modules after pass {} must be a subset of rechecked '
'modules ({}:{})').format(passnum, path, p[i0].line))
if not suite.required_out_section:
ok = True
if ok:
input = p[i0].data
expand_errors(input, tcout, 'main')
for file_path, contents in files:
expand_errors(contents.split('\n'), tcout, file_path)
lastline = p[i].line if i < len(p) else p[i - 1].line + 9999
arg0 = p[i0].arg
assert arg0 is not None
case_name = add_test_name_suffix(arg0, suite.test_name_suffix)
skip = arg0.endswith('-skip')
if skip:
case_name = case_name[:-len('-skip')]
yield DataDrivenTestCase(case_name, parent, skip, input, tcout, tcout2, path,
p[i0].line, lastline,
files, output_files, stale_modules,
rechecked_modules, deleted_paths, suite.native_sep,
triggered)
if not ok:
raise ValueError(
'{}, line {}: Error in test case description'.format(
path, p[i0].line))
class DataDrivenTestCase(pytest.Item): # type: ignore # inheriting from Any
"""Holds parsed data-driven test cases, and handles directory setup and teardown."""
# TODO: only create files on setup, not during parsing
input = None # type: List[str]
output = None # type: List[str] # Output for the first pass
output2 = None # type: Dict[int, List[str]] # Output for runs 2+, indexed by run number
file = ''
line = 0
# (file path, file content) tuples
files = None # type: List[Tuple[str, str]]
expected_stale_modules = None # type: Dict[int, Set[str]]
expected_rechecked_modules = None # type: Dict[int, Set[str]]
# Files/directories to clean up after test case; (is directory, path) tuples
clean_up = None # type: List[Tuple[bool, str]]
def __init__(self,
name: str,
parent: 'DataSuiteCollector',
skip: bool,
input: List[str],
output: List[str],
output2: Dict[int, List[str]],
file: str,
line: int,
lastline: int,
files: List[Tuple[str, str]],
output_files: List[Tuple[str, str]],
expected_stale_modules: Dict[int, Set[str]],
expected_rechecked_modules: Dict[int, Set[str]],
deleted_paths: Dict[int, Set[str]],
native_sep: bool = False,
triggered: Optional[List[str]] = None,
) -> None:
super().__init__(name, parent)
self.skip = skip
self.old_cwd = None # type: Optional[str]
self.tmpdir = None # type: Optional[tempfile.TemporaryDirectory[str]]
self.input = input
self.output = output
self.output2 = output2
self.lastline = lastline
self.file = file
self.line = line
self.files = files
self.output_files = output_files
self.expected_stale_modules = expected_stale_modules
self.expected_rechecked_modules = expected_rechecked_modules
self.deleted_paths = deleted_paths
self.native_sep = native_sep
self.triggered = triggered or []
def runtest(self) -> None:
if self.skip:
pytest.skip()
suite = self.parent.obj()
suite.setup()
suite.run_case(self)
def setup(self) -> None:
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.TemporaryDirectory(prefix='mypy-test-')
os.chdir(self.tmpdir.name)
os.mkdir('tmp')
encountered_files = set()
self.clean_up = []
for paths in self.deleted_paths.values():
for path in paths:
self.clean_up.append((False, path))
encountered_files.add(path)
for path, content in self.files:
dir = os.path.dirname(path)
for d in self.add_dirs(dir):
self.clean_up.append((True, d))
with open(path, 'w') as f:
f.write(content)
if path not in encountered_files:
self.clean_up.append((False, path))
encountered_files.add(path)
if re.search(r'\.[2-9]$', path):
# Make sure new files introduced in the second and later runs are accounted for
renamed_path = path[:-2]
if renamed_path not in encountered_files:
encountered_files.add(renamed_path)
self.clean_up.append((False, renamed_path))
for path, _ in self.output_files:
# Create directories for expected output and mark them to be cleaned up at the end
# of the test case.
dir = os.path.dirname(path)
for d in self.add_dirs(dir):
self.clean_up.append((True, d))
self.clean_up.append((False, path))
def add_dirs(self, dir: str) -> List[str]:
"""Add all subdirectories required to create dir.
Return an array of the created directories in the order of creation.
"""
if dir == '' or os.path.isdir(dir):
return []
else:
dirs = self.add_dirs(os.path.dirname(dir)) + [dir]
os.mkdir(dir)
return dirs
def teardown(self) -> None:
# First remove files.
for is_dir, path in reversed(self.clean_up):
if not is_dir:
try:
remove(path)
except FileNotFoundError:
# Breaking early using Ctrl+C may happen before file creation. Also, some
# files may be deleted by a test case.
pass
# Then remove directories.
for is_dir, path in reversed(self.clean_up):
if is_dir:
pycache = os.path.join(path, '__pycache__')
if os.path.isdir(pycache):
shutil.rmtree(pycache)
# As a somewhat nasty hack, ignore any dirs with .mypy_cache in the path,
# to allow test cases to intentionally corrupt the cache without provoking
# the test suite when there are still files left over.
# (Looking at / should be fine on windows because these are paths specified
# in the test cases.)
if '/.mypy_cache' in path:
continue
try:
rmdir(path)
except OSError as error:
print(' ** Error removing directory %s -- contents:' % path)
for item in os.listdir(path):
print(' ', item)
# Most likely, there are some files in the
# directory. Use rmtree to nuke the directory, but
# fail the test case anyway, since this seems like
# a bug in a test case -- we shouldn't leave
# garbage lying around. By nuking the directory,
# the next test run hopefully passes.
path = error.filename
# Be defensive -- only call rmtree if we're sure we aren't removing anything
# valuable.
if path.startswith(test_temp_dir + '/') and os.path.isdir(path):
shutil.rmtree(path)
raise
assert self.old_cwd is not None and self.tmpdir is not None, \
"test was not properly set up"
os.chdir(self.old_cwd)
try:
self.tmpdir.cleanup()
except OSError:
pass
self.old_cwd = None
self.tmpdir = None
def reportinfo(self) -> Tuple[str, int, str]:
return self.file, self.line, self.name
def repr_failure(self, excinfo: Any) -> str:
if excinfo.errisinstance(SystemExit):
# We assume that before doing exit() (which raises SystemExit) we've printed
# enough context about what happened so that a stack trace is not useful.
# In particular, uncaught exceptions during semantic analysis or type checking
# call exit() and they already print out a stack trace.
excrepr = excinfo.exconly()
else:
self.parent._prunetraceback(excinfo)
excrepr = excinfo.getrepr(style='short')
return "data: {}:{}:\n{}".format(self.file, self.line, excrepr)
def find_steps(self) -> List[List[FileOperation]]:
"""Return a list of descriptions of file operations for each incremental step.
The first list item corresponds to the first incremental step, the second for the
second step, etc. Each operation can either be a file modification/creation (UpdateFile)
or deletion (DeleteFile).
Defaults to having two steps if there aern't any operations.
"""
steps = {} # type: Dict[int, List[FileOperation]]
for path, _ in self.files:
m = re.match(r'.*\.([0-9]+)$', path)
if m:
num = int(m.group(1))
assert num >= 2
target_path = re.sub(r'\.[0-9]+$', '', path)
module = module_from_path(target_path)
operation = UpdateFile(module, path, target_path)
steps.setdefault(num, []).append(operation)
for num, paths in self.deleted_paths.items():
assert num >= 2
for path in paths:
module = module_from_path(path)
steps.setdefault(num, []).append(DeleteFile(module, path))
max_step = max(steps) if steps else 2
return [steps.get(num, []) for num in range(2, max_step + 1)]
def module_from_path(path: str) -> str:
path = re.sub(r'\.pyi?$', '', path)
# We can have a mix of Unix-style and Windows-style separators.
parts = re.split(r'[/\\]', path)
assert parts[0] == test_temp_dir
del parts[0]
module = '.'.join(parts)
module = re.sub(r'\.__init__$', '', module)
return module
class TestItem:
"""Parsed test caseitem.
An item is of the form
[id arg]
.. data ..
"""
id = ''
arg = '' # type: Optional[str]
# Text data, array of 8-bit strings
data = None # type: List[str]
file = ''
line = 0 # Line number in file
def __init__(self, id: str, arg: Optional[str], data: List[str], file: str,
line: int) -> None:
self.id = id
self.arg = arg
self.data = data
self.file = file
self.line = line
def parse_test_data(l: List[str], fnam: str) -> List[TestItem]:
"""Parse a list of lines that represent a sequence of test items."""
ret = [] # type: List[TestItem]
data = [] # type: List[str]
id = None # type: Optional[str]
arg = None # type: Optional[str]
i = 0
i0 = 0
while i < len(l):
s = l[i].strip()
if l[i].startswith('[') and s.endswith(']') and not s.startswith('[['):
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, strip_list(data), fnam, i0 + 1))
i0 = i
id = s[1:-1]
arg = None
if ' ' in id:
arg = id[id.index(' ') + 1:]
id = id[:id.index(' ')]
data = []
elif l[i].startswith('[['):
data.append(l[i][1:])
elif not l[i].startswith('--'):
data.append(l[i])
elif l[i].startswith('----'):
data.append(l[i][2:])
i += 1
# Process the last item.
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, fnam, i0 + 1))
return ret
def strip_list(l: List[str]) -> List[str]:
"""Return a stripped copy of l.
Strip whitespace at the end of all lines, and strip all empty
lines from the end of the array.
"""
r = [] # type: List[str]
for s in l:
# Strip spaces at end of line
r.append(re.sub(r'\s+$', '', s))
while len(r) > 0 and r[-1] == '':
r.pop()
return r
def collapse_line_continuation(l: List[str]) -> List[str]:
r = [] # type: List[str]
cont = False
for s in l:
ss = re.sub(r'\\$', '', s)
if cont:
r[-1] += re.sub('^ +', '', ss)
else:
r.append(ss)
cont = s.endswith('\\')
return r
def expand_variables(s: str) -> str:
return s.replace('<ROOT>', root_dir)
def expand_errors(input: List[str], output: List[str], fnam: str) -> None:
"""Transform comments such as '# E: message' or
'# E:3: message' in input.
The result is lines like 'fnam:line: error: message'.
"""
for i in range(len(input)):
# The first in the split things isn't a comment
for possible_err_comment in input[i].split(' # ')[1:]:
m = re.search(
'^([ENW]):((?P<col>\d+):)? (?P<message>.*)$',
possible_err_comment.strip())
if m:
if m.group(1) == 'E':
severity = 'error'
elif m.group(1) == 'N':
severity = 'note'
elif m.group(1) == 'W':
severity = 'warning'
col = m.group('col')
if col is None:
output.append(
'{}:{}: {}: {}'.format(fnam, i + 1, severity, m.group('message')))
else:
output.append('{}:{}:{}: {}: {}'.format(
fnam, i + 1, col, severity, m.group('message')))
def fix_win_path(line: str) -> str:
r"""Changes Windows paths to Linux paths in error messages.
E.g. foo\bar.py -> foo/bar.py.
"""
line = line.replace(root_dir, root_dir.replace('\\', '/'))
m = re.match(r'^([\S/]+):(\d+:)?(\s+.*)', line)
if not m:
return line
else:
filename, lineno, message = m.groups()
return '{}:{}{}'.format(filename.replace('\\', '/'),
lineno or '', message)
def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return '{}{}{}'.format(line[:m.start(1)],
m.group('filename').replace('\\', '/'),
line[m.end(1):])
##
#
# pytest setup
#
##
# This function name is special to pytest. See
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup('mypy')
group.addoption('--update-data', action='store_true', default=False,
help='Update test data to reflect actual output'
' (supported only for certain tests)')
group.addoption('--mypy-verbose', action='count',
help='Set the verbose flag when creating mypy Options')
# This function name is special to pytest. See
# http://doc.pytest.org/en/latest/writing_plugins.html#collection-hooks
def pytest_pycollect_makeitem(collector: Any, name: str,
obj: object) -> 'Optional[Any]':
"""Called by pytest on each object in modules configured in conftest.py files.
collector is pytest.Collector, returns Optional[pytest.Class]
"""
if isinstance(obj, type):
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
if issubclass(obj, DataSuite) and obj is not DataSuite:
# Non-None result means this obj is a test case.
# The collect method of the returned DataSuiteCollector instance will be called later,
# with self.obj being obj.
return DataSuiteCollector(name, parent=collector)
return None
class DataSuiteCollector(pytest.Class): # type: ignore # inheriting from Any
def collect(self) -> Iterator[pytest.Item]: # type: ignore
"""Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
# obj is the object for which pytest_pycollect_makeitem returned self.
suite = self.obj # type: DataSuite
for f in suite.files:
yield from parse_test_cases(self, suite, os.path.join(suite.data_prefix, f))
def add_test_name_suffix(name: str, suffix: str) -> str:
# Find magic suffix of form "-foobar" (used for things like "-skip").
m = re.search(r'-[-A-Za-z0-9]+$', name)
if m:
# Insert suite-specific test name suffix before the magic suffix
# which must be the last thing in the test case name since we
# are using endswith() checks.
magic_suffix = m.group(0)
return name[:-len(magic_suffix)] + suffix + magic_suffix
else:
return name + suffix
def is_incremental(testcase: DataDrivenTestCase) -> bool:
return 'incremental' in testcase.name.lower() or 'incremental' in testcase.file
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
if any(re.match(r'# flags[2-9]:', line) for line in testcase.input):
return False
for filename, contents in testcase.files:
if os.path.basename(filename).startswith('mypy.ini.'):
return False
return True
class DataSuite:
# option fields - class variables
files = None # type: List[str]
base_path = test_temp_dir
# Allow external users of the test code to override the data prefix
data_prefix = test_data_prefix
required_out_section = False
native_sep = False
# Name suffix automatically added to each test case in the suite (can be
# used to distinguish test cases in suites that share data files)
test_name_suffix = ''
def setup(self) -> None:
"""Setup fixtures (ad-hoc)"""
pass
@abstractmethod
def run_case(self, testcase: DataDrivenTestCase) -> None:
raise NotImplementedError
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djangobmf_address', '0001_version_0_2_0'),
]
operations = [
migrations.RemoveField(
model_name='address',
name='uuid',
),
]
|
import numpy as np
import pyroomacoustics as pra
import matplotlib.pyplot as plt
from scipy.io import wavfile
from multinmf_conv_em import multinmf_conv_em_wrapper
from multinmf_recons_im import multinmf_recons_im
from utilities import partial_rir
# get the speed of sound from pyroomacoustics
c = pra.constants.get('c')
if __name__ == '__main__':
# parameters
fs = 16000
nfft = 2048 # supposedly optimal at 16 kHz (Ozerov and Fevote)
max_order = 10 # max image sources order in simulation
# convolutive separation parameters
partial_length = 2 # number of image sources to use in the 'raking'
n_latent_var = 4 # number of latent variables in the NMF
stft_win_len = 2048 # supposedly optimal at 16 kHz
# the speech samples
r1, speech1 = wavfile.read('data/Speech/fq_sample1.wav')
speech1 /= np.std(speech1)
r2, speech2 = wavfile.read('data/Speech/fq_sample2.wav')
speech2 /= np.std(speech2)
if r1 != fs or r2 != fs:
raise ValueError('The speech samples should have the same sample rate as the simulation')
# a 5 wall room
floorplan = np.array([[0, 0], [6, 0], [6, 5], [2,5], [0,3]]).T
room = pra.Room.from_corners(floorplan, fs=fs, absorption=0.4, max_order=max_order)
room.extrude(4., absorption=0.4) # add the third dimension
# add two sources
room.add_source([2, 1.5, 1.8], signal=speech1)
room.add_source([5.5, 4, 1.7], signal=speech2)
# now add a few microphones
mic_locations = np.array([[3.65, 3.5, 1.5], [3.6, 3.5, 1.55], [3.55, 3.7, 1.7]]).T
room.add_microphone_array(
pra.MicrophoneArray(mic_locations, fs)
)
# simulate propagation (through image source model)
room.compute_rir()
room.simulate()
# compute partial rir
freqvec = np.fft.rfftfreq(nfft, 1 / fs)
partial_rirs = partial_rir(room, partial_length, freqvec)
wavfile.write('data/Speech/two_sources_mix.wav', fs, room.mic_array.signals.T)
# load dictionary W
dictionary_W = np.load("W_dictionary_em.npy")
# run NMF
sep_sources = multinmf_conv_em_wrapper(
room.mic_array.signals.T, partial_rirs,
n_latent_var, n_iter=100,
A_init=partial_rirs, W_init=dictionary_W,
update_w=False)
# Plots
plt.figure()
plt.subplot(1,2,1)
plt.specgram(speech1, Fs=r1, NFFT=nfft)
plt.subplot(1,2,2)
plt.specgram(speech2, Fs=r2, NFFT=nfft)
plt.title('Original sources')
plt.figure()
for j,s in enumerate(sep_sources):
# write the separated source to a wav file
out_filename = 'data/Speech/' + 'speech_source_' + str(j) + '_MU.wav'
wavfile.write(out_filename, room.fs, s)
# show spectrogram
plt.subplot(1,2,j+1)
plt.specgram(s[:,0], Fs=room.fs, NFFT=nfft)
plt.title('Reconstructed sources')
# show all these nice plots
plt.show()
|
import textwrap
from typing import (
Sequence,
)
from ai.backend.client.session import api_session
from ai.backend.client.output.fields import storage_fields
from ai.backend.client.output.types import FieldSpec, PaginatedResult
from ai.backend.client.pagination import generate_paginated_results
from .base import api_function, BaseFunction
__all__ = (
'Storage',
)
_default_list_fields = (
storage_fields['id'],
storage_fields['backend'],
storage_fields['capabilities'],
)
_default_detail_fields = (
storage_fields['id'],
storage_fields['backend'],
storage_fields['path'],
storage_fields['fsprefix'],
storage_fields['capabilities'],
storage_fields['hardware_metadata'],
)
class Storage(BaseFunction):
"""
Provides a shortcut of :func:`Admin.query()
<ai.backend.client.admin.Admin.query>` that fetches various straoge volume
information keyed by vfolder hosts.
.. note::
All methods in this function class require your API access key to
have the *super-admin* privilege.
"""
@api_function
@classmethod
async def paginated_list(
cls,
status: str = 'ALIVE',
*,
fields: Sequence[FieldSpec] = _default_list_fields,
page_offset: int = 0,
page_size: int = 20,
filter: str = None,
order: str = None,
) -> PaginatedResult[dict]:
"""
Lists the keypairs.
You need an admin privilege for this operation.
"""
return await generate_paginated_results(
'storage_volume_list',
{
'filter': (filter, 'String'),
'order': (order, 'String'),
},
fields,
page_offset=page_offset,
page_size=page_size,
)
@api_function
@classmethod
async def detail(
cls,
vfolder_host: str,
fields: Sequence[FieldSpec] = _default_detail_fields,
) -> dict:
query = textwrap.dedent("""\
query($vfolder_host: String!) {
storage_volume(id: $vfolder_host) {$fields}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {'vfolder_host': vfolder_host}
data = await api_session.get().Admin._query(query, variables)
return data['storage_volume']
|
import hashlib
import base64
import os
from django.conf import settings
from django.http import JsonResponse
def handle(request):
method = request.method
if(method == 'POST'):
data = request.read()
id_mask = create_mask(data)
return JsonResponse({'id': id_mask})
if(method == 'GET'):
id_list = get_mask_id()
return JsonResponse(id_list, safe=False)
def create_mask(data: str) -> str:
"""[Store an mask with unique ID]
Content of the POST request
Create a new instance mask with unique ID in HASH
Returns:
[str]:[id mask]
"""
data_path = settings.STORAGE_DIR
mask_md5 = hashlib.md5(str(data).encode())
mask = base64.b64decode(data)
id_mask = mask_md5.hexdigest()
decode_mask = open(data_path+'/mask/mask_'+id_mask+'.nii', 'wb')
decode_mask.write(mask)
decode_mask.close()
return id_mask
def get_mask_id():
"""
Returns:
[list]: [Return a list with all Mask id content in the storage]
"""
storage_folder = settings.STORAGE_DIR+'/image'
list_id = []
for file in os.listdir(storage_folder):
if os.path.isfile(os.path.join(storage_folder, file)):
id = file[6:-4]
list_id.append(id)
return list_id
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Generate Planning Path
"""
import argparse
import atexit
import logging
import os
import sys
import rospy
import scipy.signal as signal
from logger import Logger
from numpy import genfromtxt
from modules.canbus.proto import chassis_pb2
from modules.common.proto import pnc_point_pb2
from modules.control.proto import pad_msg_pb2
from modules.hmi.proto import runtime_status_pb2
from modules.localization.proto import localization_pb2
from modules.planning.proto import planning_pb2
# Import hmi_status_helper
APOLLO_ROOT = os.path.join(os.path.dirname(__file__), '../../../')
hmi_utils_path = os.path.join(APOLLO_ROOT, 'modules/hmi/utils')
if hmi_utils_path not in sys.path:
sys.path.append(hmi_utils_path)
import hmi_status_helper
SEARCH_INTERVAL = 1000
class RtkPlayer(object):
"""
rtk player class
"""
def __init__(self, record_file, speedmultiplier, completepath, replan):
"""Init player."""
self.firstvalid = False
self.logger = Logger.get_logger(tag="RtkPlayer")
self.logger.info("Load record file from: %s" % record_file)
try:
file_handler = open(record_file, 'r')
except:
self.logger.error("Cannot find file: " + record_file)
file_handler.close()
sys.exit(0)
self.data = genfromtxt(file_handler, delimiter=',', names=True)
file_handler.close()
self.localization = localization_pb2.LocalizationEstimate()
self.chassis = chassis_pb2.Chassis()
self.padmsg = pad_msg_pb2.PadMessage()
self.localization_received = False
self.chassis_received = False
self.planning_pub = rospy.Publisher(
'/apollo/planning', planning_pb2.ADCTrajectory, queue_size=1)
self.speedmultiplier = speedmultiplier / 100
self.terminating = False
self.sequence_num = 0
b, a = signal.butter(6, 0.05, 'low')
self.data['acceleration'] = signal.filtfilt(b, a,
self.data['acceleration'])
self.start = 0
self.end = 0
self.closestpoint = 0
self.automode = False
self.replan = (replan == 't')
self.completepath = (completepath == 't')
self.estop = False
# Report status to HMI.
status_pb = runtime_status_pb2.RuntimeStatus()
status_pb.tools.planning_ready = True
hmi_status_helper.HMIStatusHelper.report_status(status_pb)
self.logger.info("Planning Ready")
def localization_callback(self, data):
"""
New localization Received
"""
self.localization.CopyFrom(data)
self.carx = self.localization.pose.position.x
self.cary = self.localization.pose.position.y
self.carz = self.localization.pose.position.z
self.localization_received = True
def chassis_callback(self, data):
"""
New chassis Received
"""
self.chassis.CopyFrom(data)
self.automode = (self.chassis.driving_mode ==
chassis_pb2.Chassis.COMPLETE_AUTO_DRIVE)
self.chassis_received = True
def padmsg_callback(self, data):
"""
New message received
"""
if self.terminating == True:
self.logger.info("terminating when receive padmsg")
return
self.padmsg.CopyFrom(data)
def restart(self):
self.logger.info("before replan self.start=%s, self.closestpoint=%s" %
(self.start, self.closestpoint))
self.closestpoint = self.closest_dist()
self.start = max(self.closestpoint - 100, 0)
self.starttime = rospy.get_time()
self.end = min(self.start + 1000, len(self.data) - 1)
self.logger.info("finish replan at time %s, self.closestpoint=%s" %
(self.starttime, self.closestpoint))
def closest_dist(self):
shortest_dist_sqr = float('inf')
self.logger.info("before closest self.start=%s" %
(self.start))
search_start = max(self.start - SEARCH_INTERVAL / 2, 0)
search_end = min(self.start + SEARCH_INTERVAL / 2, len(self.data))
start = self.start
for i in range(search_start, search_end):
dist_sqr = (self.carx - self.data['x'][i]) ** 2 + \
(self.cary - self.data['y'][i]) ** 2
if dist_sqr <= shortest_dist_sqr:
start = i
shortest_dist_sqr = dist_sqr
return start
def closest_time(self):
time_elapsed = rospy.get_time() - self.starttime
closest_time = self.start
time_diff = self.data['time'][closest_time] - \
self.data['time'][self.closestpoint]
while time_diff < time_elapsed and closest_time < (
len(self.data) - 1):
closest_time = closest_time + 1
time_diff = self.data['time'][closest_time] - \
self.data['time'][self.closestpoint]
return closest_time
def publish_planningmsg(self):
"""
Generate New Path
"""
if not self.localization_received:
self.logger.warning(
"locaization not received yet when publish_planningmsg")
return
planningdata = planning_pb2.ADCTrajectory()
now = rospy.get_time()
planningdata.header.timestamp_sec = now
planningdata.header.module_name = "planning"
planningdata.header.sequence_num = self.sequence_num
self.sequence_num = self.sequence_num + 1
self.logger.debug(
"publish_planningmsg: before adjust start: self.start = %s, self.end=%s"
% (self.start, self.end))
if self.replan or self.sequence_num <= 1 or not self.automode:
self.logger.info(
"trigger replan: self.replan=%s, self.sequence_num=%s, self.automode=%s"
% (self.replan, self.sequence_num, self.automode))
self.restart()
else:
timepoint = self.closest_time()
distpoint = self.closest_dist()
self.start = max(min(timepoint, distpoint) - 100, 0)
self.end = min(max(timepoint, distpoint) + 900, len(self.data) - 1)
xdiff_sqr = (self.data['x'][timepoint] - self.carx)**2
ydiff_sqr = (self.data['y'][timepoint] - self.cary)**2
if xdiff_sqr + ydiff_sqr > 4.0:
self.logger.info("trigger replan: distance larger than 2.0")
self.restart()
if self.completepath:
self.start = 0
self.end = len(self.data) - 1
self.logger.debug(
"publish_planningmsg: after adjust start: self.start = %s, self.end=%s"
% (self.start, self.end))
for i in range(self.start, self.end):
adc_point = pnc_point_pb2.TrajectoryPoint()
adc_point.path_point.x = self.data['x'][i]
adc_point.path_point.y = self.data['y'][i]
adc_point.path_point.z = self.data['z'][i]
adc_point.v = self.data['speed'][i] * self.speedmultiplier
adc_point.a = self.data['acceleration'][
i] * self.speedmultiplier
adc_point.path_point.kappa = self.data['curvature'][i]
adc_point.path_point.dkappa = self.data[
'curvature_change_rate'][i]
time_diff = self.data['time'][i] - \
self.data['time'][self.closestpoint]
adc_point.relative_time = time_diff / self.speedmultiplier - (
now - self.starttime)
adc_point.path_point.theta = self.data['theta'][i]
adc_point.path_point.s = self.data['s'][i]
planningdata.trajectory_point.extend([adc_point])
planningdata.estop.is_estop = self.estop
planningdata.total_path_length = self.data['s'][self.end] - \
self.data['s'][self.start]
planningdata.total_path_time = self.data['time'][self.end] - \
self.data['time'][self.start]
planningdata.gear = int(self.data['gear'][self.closest_time()])
self.planning_pub.publish(planningdata)
self.logger.debug("Generated Planning Sequence: " +
str(self.sequence_num - 1))
def shutdown(self):
"""
shutdown rosnode
"""
self.terminating = True
self.logger.info("Shutting Down...")
rospy.sleep(0.2)
def quit(self, signum, frame):
"""
shutdown the keypress thread
"""
sys.exit(0)
def main():
"""
Main rosnode
"""
parser = argparse.ArgumentParser(
description='Generate Planning Trajectory from Data File')
parser.add_argument(
'-s',
'--speedmulti',
help='Speed multiplier in percentage (Default is 100) ',
type=float,
default='100')
parser.add_argument(
'-c', '--complete', help='Generate complete path (t/F)', default='F')
parser.add_argument(
'-r',
'--replan',
help='Always replan based on current position(t/F)',
default='F')
args = vars(parser.parse_args())
rospy.init_node('rtk_player', anonymous=True)
Logger.config(
log_file=os.path.join(APOLLO_ROOT, 'data/log/rtk_player.log'),
use_stdout=True,
log_level=logging.DEBUG)
record_file = os.path.join(APOLLO_ROOT, 'data/log/garage.csv')
player = RtkPlayer(record_file, args['speedmulti'],
args['complete'].lower(), args['replan'].lower())
atexit.register(player.shutdown)
rospy.Subscriber('/apollo/canbus/chassis', chassis_pb2.Chassis,
player.chassis_callback)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
player.localization_callback)
rospy.Subscriber('/apollo/control/pad', pad_msg_pb2.PadMessage,
player.padmsg_callback)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
player.publish_planningmsg()
rate.sleep()
if __name__ == '__main__':
main()
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class PythonSynthDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfFreeBSD # llvm.org/pr20545 bogus output confuses buildbot parser
def test_with_run_command(self):
"""Test data formatter commands."""
self.build()
self.data_formatter_commands()
def test_rdar10960550_with_run_command(self):
"""Test data formatter commands."""
self.build()
self.rdar10960550_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
self.line2 = line_number('main.cpp',
'// Set cast break point at this line.')
self.line3 = line_number(
'main.cpp', '// Set second cast break point at this line.')
def data_formatter_commands(self):
"""Test using Python synthetic children provider."""
_, process, thread, _ = lldbutil.run_to_line_breakpoint(
self, lldb.SBFileSpec("main.cpp"), self.line)
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# print the f00_1 variable without a synth
self.expect("frame variable f00_1",
substrs=['a = 1',
'b = 2',
'r = 34'])
# now set up the synth
self.runCmd("script from fooSynthProvider import *")
self.runCmd("type synth add -l fooSynthProvider foo")
self.runCmd("type synth add -l wrapfooSynthProvider wrapfoo")
self.expect("type synthetic list foo", substrs=['fooSynthProvider'])
# note that the value of fake_a depends on target byte order
if process.GetByteOrder() == lldb.eByteOrderLittle:
fake_a_val = 0x02000000
else:
fake_a_val = 0x00000100
# check that we get the two real vars and the fake_a variables
self.expect(
"frame variable f00_1",
substrs=[
'a = 1',
'fake_a = %d' % fake_a_val,
'r = 34',
])
# check that we do not get the extra vars
self.expect("frame variable f00_1", matching=False,
substrs=['b = 2'])
# check access to members by name
self.expect('frame variable f00_1.fake_a',
substrs=['%d' % fake_a_val])
# check access to members by index
self.expect('frame variable f00_1[1]',
substrs=['%d' % fake_a_val])
# put synthetic children in summary in several combinations
self.runCmd(
"type summary add --summary-string \"fake_a=${svar.fake_a}\" foo")
self.expect('frame variable f00_1',
substrs=['fake_a=%d' % fake_a_val])
self.runCmd(
"type summary add --summary-string \"fake_a=${svar[1]}\" foo")
self.expect('frame variable f00_1',
substrs=['fake_a=%d' % fake_a_val])
# clear the summary
self.runCmd("type summary delete foo")
# check that the caching does not span beyond the stopoint
self.runCmd("n")
if process.GetByteOrder() == lldb.eByteOrderLittle:
fake_a_val = 0x02000000
else:
fake_a_val = 0x00000200
self.expect(
"frame variable f00_1",
substrs=[
'a = 2',
'fake_a = %d' % fake_a_val,
'r = 34',
])
# check that altering the object also alters fake_a
self.runCmd("expr f00_1.a = 280")
if process.GetByteOrder() == lldb.eByteOrderLittle:
fake_a_val = 0x02000001
else:
fake_a_val = 0x00011800
self.expect(
"frame variable f00_1",
substrs=[
'a = 280',
'fake_a = %d' % fake_a_val,
'r = 34',
])
# check that expanding a pointer does the right thing
if process.GetByteOrder() == lldb.eByteOrderLittle:
fake_a_val = 0x0d000000
else:
fake_a_val = 0x00000c00
self.expect(
"frame variable --ptr-depth 1 f00_ptr",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
self.expect(
"frame variable --ptr-depth 1 wrapper",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
# now add a filter.. it should fail
self.expect("type filter add foo --child b --child j", error=True,
substrs=['cannot add'])
# we get the synth again..
self.expect('frame variable f00_1', matching=False,
substrs=['b = 1',
'j = 17'])
self.expect(
"frame variable --ptr-depth 1 f00_ptr",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
self.expect(
"frame variable --ptr-depth 1 wrapper",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
# Test that the custom dereference operator for `wrapfoo` works through
# the Python API. The synthetic children provider gets queried at
# slightly different times in this case.
wrapper_var = thread.GetSelectedFrame().FindVariable('wrapper')
foo_var = wrapper_var.Dereference()
self.assertEqual(foo_var.GetNumChildren(), 3)
self.assertEqual(foo_var.GetChildAtIndex(0).GetName(), 'a')
self.assertEqual(foo_var.GetChildAtIndex(1).GetName(), 'fake_a')
self.assertEqual(foo_var.GetChildAtIndex(2).GetName(), 'r')
# now delete the synth and add the filter
self.runCmd("type synth delete foo")
self.runCmd("type synth delete wrapfoo")
self.runCmd("type filter add foo --child b --child j")
self.expect('frame variable f00_1',
substrs=['b = 2',
'j = 18'])
self.expect("frame variable --ptr-depth 1 f00_ptr", matching=False,
substrs=['r = 45',
'fake_a = %d' % fake_a_val,
'a = 12'])
self.expect("frame variable --ptr-depth 1 wrapper", matching=False,
substrs=['r = 45',
'fake_a = %d' % fake_a_val,
'a = 12'])
# now add the synth and it should fail
self.expect("type synth add -l fooSynthProvider foo", error=True,
substrs=['cannot add'])
# check the listing
self.expect('type synth list', matching=False,
substrs=['foo',
'Python class fooSynthProvider'])
self.expect('type filter list',
substrs=['foo',
'.b',
'.j'])
# delete the filter, add the synth
self.runCmd("type filter delete foo")
self.runCmd("type synth add -l fooSynthProvider foo")
self.expect('frame variable f00_1', matching=False,
substrs=['b = 2',
'j = 18'])
self.expect(
"frame variable --ptr-depth 1 f00_ptr",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
self.expect(
"frame variable --ptr-depth 1 wrapper",
substrs=[
'a = 12',
'fake_a = %d' % fake_a_val,
'r = 45',
])
# check the listing
self.expect('type synth list',
substrs=['foo',
'Python class fooSynthProvider'])
self.expect('type filter list', matching=False,
substrs=['foo',
'.b',
'.j'])
# delete the synth and check that we get good output
self.runCmd("type synth delete foo")
self.expect("frame variable f00_1",
substrs=['a = 280',
'b = 2',
'j = 18'])
self.expect("frame variable f00_1", matching=False,
substrs=['fake_a = '])
def rdar10960550_formatter_commands(self):
"""Test that synthetic children persist stoppoints."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
# The second breakpoint is on a multi-line expression, so the comment
# can't be on the right line...
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line2, num_expected_locations=1, loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line3, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("command script import ./ftsp.py --allow-reload")
self.runCmd("type synth add -l ftsp.ftsp wrapint")
# we need to check that the VO is properly updated so that the same synthetic children are reused
# but their values change correctly across stop-points - in order to do this, self.runCmd("next")
# does not work because it forces a wipe of the stack frame - this is why we are using this more contrived
# mechanism to achieve our goal of preserving test_cast as a VO
test_cast = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable('test_cast')
str_cast = str(test_cast)
if self.TraceOn():
print(str_cast)
self.assertTrue(str_cast.find('A') != -1, 'could not find A in output')
self.assertTrue(str_cast.find('B') != -1, 'could not find B in output')
self.assertTrue(str_cast.find('C') != -1, 'could not find C in output')
self.assertTrue(str_cast.find('D') != -1, 'could not find D in output')
self.assertTrue(
str_cast.find("4 = '\\0'") != -1,
'could not find item 4 == 0')
self.dbg.GetSelectedTarget().GetProcess().GetSelectedThread().StepOver()
str_cast = str(test_cast)
if self.TraceOn():
print(str_cast)
# we detect that all the values of the child objects have changed - but the counter-generated item
# is still fixed at 0 because it is cached - this would fail if update(self): in ftsp returned False
# or if synthetic children were not being preserved
self.assertTrue(str_cast.find('Q') != -1, 'could not find Q in output')
self.assertTrue(str_cast.find('X') != -1, 'could not find X in output')
self.assertTrue(str_cast.find('T') != -1, 'could not find T in output')
self.assertTrue(str_cast.find('F') != -1, 'could not find F in output')
self.assertTrue(
str_cast.find("4 = '\\0'") != -1,
'could not find item 4 == 0')
|
name = 'pylibimport'
version = '1.9.2'
description = 'Python utility for importing packages with the same name, but different version.'
url = 'https://github.com/justengel/pylibimport'
author = 'Justin Engel'
author_email = 'jtengel08@gmail.com'
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("XGBClassifier" , "BreastCancer" , "sqlite")
|
import sqlite3 as sql3
#function for Stretch
def get_scalar_result(conn, sql):
cursor=conn.cursor()
cursor.execute(sql)
return cursor.fetchall()
conn = sql3.connect('demo_data.sqlite3')
curs = conn.cursor()
creatq = 'create table demo (s VARCHAR, x int, y int);'
curs.execute(creatq)
insertli = ["('g', 3, 9);","('v', 5, 7);","('f', 8, 7);"]
for i in insertli:
query = """
insert into demo (s, x, y)
values""" + i
curs.execute
curs.close()
conn.commit()
count = 'select count(*) from demo;'
countr = get_scalar_result(conn, count)
print(f'There are {countr[0][0]} rows.')
at5 = 'select count(*) from demo where x >= 5 and y >= 5;'
at5r = get_scalar_result(conn, at5)
print(f'There are {at5r[0][0]} rows that x and y are at least 5.')
unique = 'select count(DISTINCT y) from demo;'
uniquer = get_scalar_result(conn, unique)
print(f'There are {uniquer[0][0]} unique values in column y.')
|
# Generated by Django 3.0.3 on 2020-03-08 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
import argparse
import gzip
def main():
# Args
args = parse_args()
# Concat together
with gzip.open(args.out, 'w') as out_h:
for inf in args.in_json:
with gzip.open(inf, 'r') as in_h:
for line in in_h:
line = line.decode().rstrip()
out_h.write((line + '\n').encode())
return 0
def parse_args():
''' Load command line args
'''
p = argparse.ArgumentParser()
# Add input files
p.add_argument('--in_json',
metavar="<file>",
help=("List of json files to concatenate"),
type=str,
nargs='+',
required=True)
p.add_argument('--out',
metavar="<file>",
help=("Concatenated json file"),
type=str,
required=True)
args = p.parse_args()
return args
if __name__ == '__main__':
main()
|
"""The tests for the Shell command component."""
import os
import tempfile
import unittest
from unittest.mock import patch
from subprocess import SubprocessError
from homeassistant.bootstrap import _setup_component
from homeassistant.components import shell_command
from tests.common import get_test_home_assistant
class TestShellCommand(unittest.TestCase):
"""Test the Shell command component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_executing_service(self):
"""Test if able to call a configured service."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
assert _setup_component(self.hass, shell_command.DOMAIN, {
shell_command.DOMAIN: {
'test_service': "date > {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
def test_config_not_dict(self):
"""Test if config is not a dict."""
assert not _setup_component(self.hass, shell_command.DOMAIN, {
shell_command.DOMAIN: ['some', 'weird', 'list']
})
def test_config_not_valid_service_names(self):
"""Test if config contains invalid service names."""
assert not _setup_component(self.hass, shell_command.DOMAIN, {
shell_command.DOMAIN: {
'this is invalid because space': 'touch bla.txt'
}
})
def test_template_render_no_template(self):
"""Ensure shell_commands without templates get rendered properly."""
cmd, shell = shell_command._parse_command(self.hass, 'ls /bin', {})
self.assertTrue(shell)
self.assertEqual(cmd, 'ls /bin')
def test_template_render(self):
"""Ensure shell_commands with templates get rendered properly."""
self.hass.states.set('sensor.test_state', 'Works')
cmd, shell = shell_command._parse_command(
self.hass,
'ls /bin {{ states.sensor.test_state.state }}', {}
)
self.assertFalse(shell, False)
self.assertEqual(cmd[-1], 'Works')
def test_invalid_template_fails(self):
"""Test that shell_commands with invalid templates fail."""
cmd, _shell = shell_command._parse_command(
self.hass,
'ls /bin {{ states. .test_state.state }}', {}
)
self.assertEqual(cmd, None)
@patch('homeassistant.components.shell_command.subprocess.call',
side_effect=SubprocessError)
@patch('homeassistant.components.shell_command._LOGGER.error')
def test_subprocess_raising_error(self, mock_call, mock_error):
"""Test subprocess."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
assert _setup_component(self.hass, shell_command.DOMAIN, {
shell_command.DOMAIN: {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertFalse(os.path.isfile(path))
self.assertEqual(1, mock_error.call_count)
|
from django.core.management.base import BaseCommand
from parkings.importers import PermitAreaImporter
class Command(BaseCommand):
help = 'Uses the PermitAreaImporter to create permit areas'
def handle(self, *args, **options):
PermitAreaImporter().import_permit_areas()
|
from sqlalchemy import Column, Integer, BLOB, ForeignKey, Index, String, UnicodeText, BigInteger, Boolean
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from rdr_server.common.enums import WithdrawalStatus, SuspensionStatus, WithdrawalReason
from rdr_server.model.base_model import BaseModel, ModelMixin, UTCDateTime, ModelEnum
class ParticipantBase(ModelMixin):
"""Mixin with shared columns for Participant and ParticipantHistory"""
# Randomly assigned internal ID. We tack 'P' on the front whenever we use this externally.
participantId = Column('participant_id', String(20), unique=True)
# Assigned ID from PTSC. Recieved in request to create a new Participant.
externalId = Column('external_id', BigInteger, unique=True)
# Incrementing version, starts at 1 and is incremented on each update.
version = Column('version', Integer, nullable=False)
# Randomly assigned ID used with Biobank. Prefixed with 'B' whenever we use this externally.
biobankId = Column('biobank_id', Integer, nullable=False)
lastModified = Column('last_modified', UTCDateTime, nullable=False)
signUpTime = Column('sign_up_time', UTCDateTime, nullable=False)
# One or more HPO IDs in FHIR JSON. (The primary link is separately stored as hpoId.)
providerLink = Column('provider_link', BLOB)
# Both HealthPro and PTC can mutate participants; we use clientId to track
# which system did it. An client ID of example@example.com means we created fake data for this
# participant.
clientId = Column('client_id', String(80))
# Default values for withdrawal and suspension are managed through the DAO (instead of column
# defaults here) to simplify insert v. update semantics.
# Withdrawal is permanent, and indicates we should neither contact the participant nor use their
# data in the future.
withdrawalStatus = Column('withdrawal_status', ModelEnum(WithdrawalStatus), nullable=False)
# The time at which the participants set their withdrawal status to NO_USE.
withdrawalTime = Column('withdrawal_time', UTCDateTime)
withdrawalReason = Column('withdrawal_reason', ModelEnum(WithdrawalReason))
withdrawalReasonJustification = Column('withdrawal_reason_justification', UnicodeText)
# Suspension may be temporary, and indicates we should not contact the participant but may
# continue using their data.
suspensionStatus = Column('suspension_status', ModelEnum(SuspensionStatus), nullable=False)
# The time at which the participant set their suspension status to NO_CONTACT.
suspensionTime = Column('suspension_time', UTCDateTime)
# If a participant is deemed to be a "ghost" i.e. not real or empty participant obj.
isGhostId = Column('is_ghost_id', Boolean)
# The date the participant was marked as ghost
dateAddedGhost = Column('date_added_ghost', UTCDateTime)
@declared_attr
def hpoId(cls):
return Column('hpo_id', Integer, ForeignKey('hpo.hpo_id'), nullable=False)
@declared_attr
def organizationId(cls):
return Column('organization_id', Integer, ForeignKey('organization.organization_id'))
@declared_attr
def siteId(cls):
return Column('site_id', Integer, ForeignKey('site.site_id'))
class Participant(ParticipantBase, BaseModel):
__tablename__ = 'participant'
participantSummary = relationship('ParticipantSummary', uselist=False,
back_populates='participant', cascade='all, delete-orphan')
Index('participant_biobank_id', Participant.biobankId, unique=True)
Index('participant_hpo_id', Participant.hpoId)
Index('participant_withdrawl_sign_up_hpo', Participant.participantId, Participant.withdrawalStatus,
Participant.signUpTime, Participant.hpoId, Participant.isGhostId)
class ParticipantHistory(ParticipantBase, BaseModel):
__tablename__ = 'participant_history'
version = Column('version', Integer, unique=True)
|
import math
from abc import ABC, abstractmethod
class AreaCalculator:
def __init__(self, shapes):
assert isinstance(shapes, list), "`shapes` should be of type `list`."
self.shapes = shapes
@property
def total_area(self):
total = 0
for shape in self.shapes:
total += shape.calculate_area()
return total
class Shape(ABC):
@abstractmethod
def calculate_area(self):
pass
class Rectangle(Shape):
def __init__(self, width, height):
self.width = width
self.height = height
def calculate_area(self):
return self.width * self.height
class Triangle(Shape):
def __init__(self, side, height):
self.side = side
self.height = height
def calculate_area(self):
return self.side * self.height / 2
class Circle(Shape):
def __init__(self, radius):
self.radius = radius
def calculate_area(self):
return math.pi * self.radius ** 2
shapes = [Rectangle(1, 6), Triangle(2, 3)]
calculator = AreaCalculator(shapes)
print("The total area is: ", calculator.total_area)
|
import logging
from Qt import (
QtCore,
QtWidgets
)
class _Signals(QtCore.QObject):
""" Custom signals """
signal_record = QtCore.Signal(logging.LogRecord)
record_context_request = QtCore.Signal(QtCore.QPoint, list, QtWidgets.QListWidget)
def __init__(self):
QtCore.QObject.__init__(self)
class LogbookHandler(logging.Handler):
""" A handler that emits qt signals dedicated to our LogBook widget. """
signals = _Signals()
def emit(self, record):
""" Emits a LogRecord object
Args:
record (logging.LogRecord): passed record object
Returns:
"""
self.signals.signal_record.emit(record)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 28 19:51:54 2015
@author: Patrick Boehnke
If you use this code please cite:
Boehnke, P., Barboni, M., & Bell, E. A. (2016). Zircon U/Th Model Ages in the Presence of Melt Heterogeneity. Quaternary Geochronology, Submitted.
For comments please contact: pboehnke @ gmail . com (remove spaces)
"""
import numpy as np
#Partition Coefficients for U relative to Th
#Value calculated and justified in manuscript
#If you wish to use your own value make the change here
DUDTh = 7
DUDThunc = 0.4 #Uncertainty at 1 sigma
#If you want to use a normal distribution put 1 if you want to resample the database put 0
#We recommend using 1 as it provides a more conservative estimate of the uncertainties
EquiUncertaintyModel = 1
#Spread of compiled volcanic data from the equiline at 1 sigma
EquilineUncertainty = 0.15 #Reported at 1 sigma
if EquiUncertaintyModel == 0:
EquilineSpread = np.loadtxt("Spread.csv", delimiter=",")
#This sets the number of samples for each sample calculation
#Higher is better but takes longer, 1000 is the default
NumIterations = 1000
#This loads the data file please change it to the right file name
#Data file must be in same directory as this python script
Data = np.loadtxt("Example.csv", delimiter=",")
U238Th232 = Data[:, 0]
U238Th2321sig = Data[:, 1]
Th230Th232 = Data[:, 2]
Th230Th2321sig = Data[:, 3]
ModelSlope = np.copy(U238Th232)
ModelSlopeUnc = np.copy(U238Th232)
NewSlope = np.zeros(NumIterations)
for iter1 in range(len(U238Th232)):
for iter2 in range(NumIterations):
U238Th232Samp = np.random.randn()*U238Th2321sig[iter1] + U238Th232[iter1]
Th230Th232Samp = np.random.randn()*Th230Th2321sig[iter1] + Th230Th232[iter1]
PartCoef = np.random.randn()*DUDThunc + DUDTh
U238Th232M = U238Th232Samp/PartCoef
if EquiUncertaintyModel == 1:
Th230Th232M = np.random.randn()*EquilineUncertainty*U238Th232M + U238Th232M
else:
Th230Th232M = U238Th232M + np.random.choice(EquilineSpread)
Slope = (Th230Th232Samp - Th230Th232M)/(U238Th232Samp - U238Th232M)
if Slope > 1:
NewSlope[iter2] = 1
elif Slope < 0:
NewSlope[iter2] = 0
else:
NewSlope[iter2] = Slope
ModelSlope[iter1] = np.mean(NewSlope)
ModelSlopeUnc[iter1] = np.std(NewSlope)
writeoutarray = np.zeros((len(ModelSlope), 2))
for iter1 in range(0, len(ModelSlope)):
writeoutarray[iter1, 0] = ModelSlope[iter1]
writeoutarray[iter1, 1] = ModelSlopeUnc[iter1]
np.savetxt("output_example.csv", writeoutarray, delimiter=",")
|
COMMON_EMAIL_HANDLES = [
'company',
'contact',
'hello',
'hi',
'info',
'me',
'support',
'team',
]
|
"""Filter Attribute Unit Test Suite"""
from ifc_data_checker.path_operators import AttributeFilterPathOperator
from ifc_data_checker import config
from tests.path_operators.path_operator_test import TestPathOperator
from tests.helpers import IfcInstanceMock
from tests.helpers import MicroMock
class TestFilterAttribute(TestPathOperator.TestParameterValidation):
"""Test Filter Attribute"""
path_operator_class = AttributeFilterPathOperator
default_path_operator = {"attribute": "attribute1", "value": "value1"}
def test_filter_attribute_same_instances(self):
"""Tests ``AttributeFilterPathOperator`` on applying path operator correctly.
Test-Purpose:
Tests that each instance, which matching the filtering criteria, gets filtered.
Under Test:
``AttributeFilterPathOperator.apply``
Given:
* attribute_name: The name of the attribute to filter
* filtering_value: The value of the attribute to filter
* instances: List of Mock Instance, each matching the filter criteria
* path_operator: the path operator using `attribute_name` and `filtering_value`
Expected:
The same list as the given `instances` list,
because every instance matching the filter criteria.
Comment:
Usage of ``MicroMock`` to represent an instance"""
attribute_name = "property"
filtering_value = "property_value"
instance = MicroMock(property=filtering_value)
instances = [instance, instance, instance]
path_operator = {"attribute": attribute_name, "value": filtering_value}
expected_result = [instance, instance, instance]
operator = AttributeFilterPathOperator(instances, path_operator)
self.assertEqual(expected_result, operator.apply())
def test_filter_attribute_different_instances(self):
"""Tests ``AttributeFilterPathOperator`` on applying path operator correctly.
Test-Purpose:
Tests that each instance, which matching the filtering criteria, gets filtered.
Under Test:
``AttributeFilterPathOperator.apply``
Given:
* attribute_name: The name of the attribute to filter
* filtering_value: The value of the attribute to filter
* instances: List of Mock Instance, some matching the filter criteria, some won't
* path_operator: the path operator using `attribute_name` and `filtering_value`
Expected:
A filtered list with the instances matching the filter criteria only
Comment:
Usage of ``MicroMock`` to represent an instance"""
attribute_name = "property"
filtering_value = "property_value"
instance = MicroMock(property=filtering_value)
different_instance = MicroMock(other=42)
instances = [instance, different_instance,
instance, different_instance,
instance, different_instance]
path_operator = {"attribute": attribute_name, "value": filtering_value}
expected_result = [instance, instance, instance]
operator = AttributeFilterPathOperator(instances, path_operator)
self.assertEqual(expected_result, operator.apply())
def test_filter_attribute_none_attribute_name(self):
"""Tests ``AttributeFilterPathOperator`` on validating the input parameter correctly.
Test-Purpose:
parameter input validation of the path operator `attribute_name`
Under Test:
``AttributeFilterPathOperator``
Given:
* filtering_value: The value of the attribute to filter
* instances: List of Mock Instance
* path_operator: the path operator using `filtering_value`
Expected:
Raises ``ValueError`` because the path operator has no defined `attribute_name`"""
filtering_value = "property_value"
instance = MicroMock(property=filtering_value)
instances = [instance, instance, instance]
path_operator = {"value": filtering_value}
self.assertRaises(ValueError, AttributeFilterPathOperator,
instances, path_operator)
def test_filter_attribute_empty_attribute_name(self):
"""Tests ``AttributeFilterPathOperator`` on validating the input parameter correctly.
Test-Purpose:
parameter input validation of the path operator `attribute_name`
Under Test:
``AttributeFilterPathOperator``
Given:
* filtering_value: The value of the attribute to filter
* instances: List of Mock Instance
* path_operator: the path operator using `filtering_value`
Expected:
Raises ``ValueError`` because the path operator has an empty defined `attribute_name`"""
filtering_value = "value"
instance = MicroMock(property=filtering_value)
instances = [instance, instance, instance]
path_operator = {"attribute": "", "value": filtering_value}
self.assertRaises(ValueError, AttributeFilterPathOperator,
instances, path_operator)
def test_filter_attribute_none_attribute_value(self):
"""Tests ``AttributeFilterPathOperator`` on validating the input parameter correctly.
Test-Purpose:
parameter input validation of the path operator `attribute_value`
Under Test:
``AttributeFilterPathOperator``
Given:
* attribute_name: The name of the attribute to filter
* instances: List of Mock Instance
* path_operator: the path operator using `attribute_name`
Expected:
Raises ``ValueError`` because the path operator has no defined `attribute_value`"""
attribute_name = "property"
instance = MicroMock(property="property1")
instances = [instance, instance, instance]
path_operator = {"attribute": attribute_name}
self.assertRaises(ValueError, AttributeFilterPathOperator,
instances, path_operator)
def test_filter_attribute_empty_attribute_value(self):
"""Tests ``AttributeFilterPathOperator`` on validating the input parameter correctly.
Test-Purpose:
parameter input validation of the path operator `attribute_value`
Under Test:
``AttributeFilterPathOperator.apply``
Given:
* filtering_value: The value of the attribute to filter
* instances: List of Mock Instance
* path_operator: the path operator using `filtering_value`
Expected:
Raises ``ValueError`` because the path operator
has an empty defined `attribute_value`"""
instance = MicroMock(property="property_value")
instances = [instance, instance, instance]
attribute_name = "property"
path_operator = {"attribute": attribute_name, "value": ""}
self.assertRaises(ValueError, AttributeFilterPathOperator,
instances, path_operator)
def test_filter_attribute_path_operator_yaml_keys(self):
"""Tests ``AttributeFilterPathOperator`` on the correct yaml keys
Test-Purpose:
Tests that the yaml keys of ``AttributeFilterPathOperator`` won't change
Under Test:
``AttributeFilterPathOperator.yaml_keys``
Given:
``AttributeFilterPathOperator``
Expected:
The `yaml_keys` of ``AttributeFilterPathOperator`` is 'attribute' and 'value'"""
self.assertEqual(("attribute", "value",),
AttributeFilterPathOperator.get_yaml_keys())
def test_filter_attribute_apply_path(self):
"""Tests ``apply_path`` on applying the path operators correctly.
Test-Purpose:
Tests that the path operator ``AttributeFilterPathOperator``
applied correctly on the actual value
Under Test:
* ``apply_path``
* implicit ``_get_path_operator``
* implicit ``all_path_operators``
Given:
* `ifc_instance`: Object of ``IfcInstanceMock`` with list of `attributes`
* `attributes`: list of MicroMock with `MicroMock(IsDefinedBy="MockWindow")` and
`MicroMock(IsDefinedBy="MockDoor")`
* list with the path operator ``ListPathOperator``, ``AttributeFilterPathOperator``:
`[{"attribute": "IsDefinedBy", "value": "MockWindow"}]`
Expected:
That the path operator ``AttributeFilterPathOperator`` get applied on the list of
`attributes` and that the `path_result` is the object of
`MicroMock(IsDefinedBy="MockWindow")`
Comment:
* Usage of ``IfcInstanceMock`` to represent an ifc instance
* Usage of ``MicroMock`` to represent an attribute"""
ifc_attribute = MicroMock(
IsDefinedBy="MockWindow"
)
other_ifc_attribute = MicroMock(
IsDefinedBy="MockDoor"
)
ifc_instance = IfcInstanceMock(
Name="IfcMock",
GlobalId="IfcMockId",
ifc_type="MockType",
attributes=[ifc_attribute, other_ifc_attribute]
)
path_operator = {"attribute": "IsDefinedBy", "value": "MockWindow"}
operator = config.get_path_operator(path_operator, [ifc_instance])
self.assertIsInstance(operator, AttributeFilterPathOperator)
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Mesos-customized entry point to the thermos_observer webserver."""
import sys
import thread
import threading
import time
from twitter.common import app, log
from twitter.common.exceptions import ExceptionalThread
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Amount, Time
from apache.aurora.executor.common.path_detector import MesosPathDetector
from apache.thermos.monitoring.resource import TaskResourceMonitor
from apache.thermos.observer.http.configure import configure_server
from apache.thermos.observer.task_observer import TaskObserver
app.add_option(
'--mesos-root',
dest='mesos_root',
type='string',
default=MesosPathDetector.DEFAULT_MESOS_ROOT,
help='The mesos root directory to search for Thermos executor sandboxes [default: %default]')
app.add_option(
'--ip',
dest='ip',
type='string',
default='0.0.0.0',
help='The IP address the observer will bind to.')
app.add_option(
'--port',
dest='port',
type='int',
default=1338,
help='The port on which the observer should listen.')
app.add_option(
'--polling_interval_secs',
dest='polling_interval_secs',
type='int',
default=int(TaskObserver.POLLING_INTERVAL.as_(Time.SECONDS)),
help='The number of seconds between observer refresh attempts.')
app.add_option(
'--task_process_collection_interval_secs',
dest='task_process_collection_interval_secs',
type='int',
default=int(TaskResourceMonitor.PROCESS_COLLECTION_INTERVAL.as_(Time.SECONDS)),
help='The number of seconds between per task process resource collections.')
app.add_option(
'--task_disk_collection_interval_secs',
dest='task_disk_collection_interval_secs',
type='int',
default=int(TaskResourceMonitor.DISK_COLLECTION_INTERVAL.as_(Time.SECONDS)),
help='The number of seconds between per task disk resource collections.')
# Allow an interruptible sleep so that ^C works.
def sleep_forever():
while True:
time.sleep(1)
def initialize(options):
path_detector = MesosPathDetector(options.mesos_root)
return TaskObserver(
path_detector,
Amount(options.polling_interval_secs, Time.SECONDS),
Amount(options.task_process_collection_interval_secs, Time.SECONDS),
Amount(options.task_disk_collection_interval_secs, Time.SECONDS))
def handle_error(exc_type, value, traceback):
""" Tear down the observer in case of unhandled errors.
By using ExceptionalThread throughout the observer we have ensured that sys.excepthook will
be called for every unhandled exception, even for those not originating in the main thread.
"""
log.error("An unhandled error occured. Tearing down.", exc_info=(exc_type, value, traceback))
# TODO: In Python 3.4 we will be able to use threading.main_thread()
if not isinstance(threading.current_thread(), threading._MainThread):
thread.interrupt_main()
def main(_, options):
observer = initialize(options)
observer.start()
root_server = configure_server(observer)
server = ExceptionalThread(target=lambda: root_server.run(options.ip, options.port, 'cherrypy'))
server.daemon = True
server.start()
sleep_forever()
sys.excepthook = handle_error
LogOptions.set_stderr_log_level('google:INFO')
app.main()
|
from zlib import crc32
import requests
class Avacat:
def __init__(self, root='https://shantichat.github.io/avacats'):
self.root = root
self.info = requests.get(f'{root}/index.json').json()
def __call__(self, name, size):
assert size in self.info['sizes'], f"Size {size} not allowed, available sizes: {self.info['sizes']}"
i = crc32(name.lower().encode()) % self.info['num']
return f'{self.root}{size}x{size}/{i}.jpg'
if __name__ == '__main__':
avacat = Avacat()
print(avacat('alice@example.com', 80)) # https://shantichat.github.io/avacats80x80/171.jpg
print(avacat('bob@example.com', 120)) # https://shantichat.github.io/avacats120x120/222.jpg
|
from typing import Any, List, Literal, TypedDict
from .FHIR_Coding import FHIR_Coding
from .FHIR_dateTime import FHIR_dateTime
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_ImagingStudy_Instance import FHIR_ImagingStudy_Instance
from .FHIR_ImagingStudy_Performer import FHIR_ImagingStudy_Performer
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_unsignedInt import FHIR_unsignedInt
# Representation of the content produced in a DICOM imaging study. A study comprises a set of series, each of which includes a set of Service-Object Pair Instances (SOP Instances - images or other data) acquired or produced in a common context. A series is of only one modality (e.g. X-ray, CT, MR, ultrasound), but a study may have multiple series of different modalities.
FHIR_ImagingStudy_Series = TypedDict(
"FHIR_ImagingStudy_Series",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# The DICOM Series Instance UID for the series.
"uid": FHIR_id,
# Extensions for uid
"_uid": FHIR_Element,
# The numeric identifier of this series in the study.
"number": FHIR_unsignedInt,
# Extensions for number
"_number": FHIR_Element,
# The modality of this series sequence.
"modality": FHIR_Coding,
# A description of the series.
"description": FHIR_string,
# Extensions for description
"_description": FHIR_Element,
# Number of SOP Instances in the Study. The value given may be larger than the number of instance elements this resource contains due to resource availability, security, or other factors. This element should be present if any instance elements are present.
"numberOfInstances": FHIR_unsignedInt,
# Extensions for numberOfInstances
"_numberOfInstances": FHIR_Element,
# The network service providing access (e.g., query, view, or retrieval) for this series. See implementation notes for information about using DICOM endpoints. A series-level endpoint, if present, has precedence over a study-level endpoint with the same Endpoint.connectionType.
"endpoint": List[FHIR_Reference],
# The anatomic structures examined. See DICOM Part 16 Annex L (http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_L.html) for DICOM to SNOMED-CT mappings. The bodySite may indicate the laterality of body part imaged; if so, it shall be consistent with any content of ImagingStudy.series.laterality.
"bodySite": FHIR_Coding,
# The laterality of the (possibly paired) anatomic structures examined. E.g., the left knee, both lungs, or unpaired abdomen. If present, shall be consistent with any laterality information indicated in ImagingStudy.series.bodySite.
"laterality": FHIR_Coding,
# The specimen imaged, e.g., for whole slide imaging of a biopsy.
"specimen": List[FHIR_Reference],
# The date and time the series was started.
"started": FHIR_dateTime,
# Extensions for started
"_started": FHIR_Element,
# Indicates who or what performed the series and how they were involved.
"performer": List[FHIR_ImagingStudy_Performer],
# A single SOP instance within the series, e.g. an image, or presentation state.
"instance": List[FHIR_ImagingStudy_Instance],
},
total=False,
)
|
class DescontoPorCincoItens(object):
def __init__(self, proximo_desconto):
self.__proximo_desconto = proximo_desconto
def calcula(self, orcamento):
if orcamento.total_itens > 5:
return orcamento.valor * 0.01
else:
return self.__proximo_desconto.calcula(orcamento)
class DescontoPorMaisDe500Reais(object):
def __init__(self, proximo_desconto):
self.__proximo_desconto = proximo_desconto
def calcula(self, orcamento):
if orcamento.valor > 500:
return orcamento.valor * 0.07
else:
return self.__proximo_desconto.calcula(orcamento)
class SemDesconto(object):
def calcula(self, orcamento):
return 0
|
from __future__ import print_function
import pyttsx3
import datetime
import smtplib
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import random
from twilio.rest import Client
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import requests
import json
from decouple import config
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/contacts.readonly']
def speak(audio):
# It speaks a given string
engine.say(audio)
engine.runAndWait()
def wishMe():
# Wish according to the time.
hour = int(datetime.datetime.now().hour)
if 0 <= hour < 12:
speak("Good Morning!")
elif 12 <= hour < 18:
speak("Good Afternoon!")
elif 18 <= hour < 23:
speak("Good Evening!")
else:
speak(
"Good Night, sir...It's good for health to have dinner and go to bed now...as you know Early to bed and "
"early to rise, makes a man healthy, wealthy and wise.")
speak("Thanks for using Era")
exit()
speak("I'm Era, your personal voice assistant. Please tell how may I help you?")
def sendEmail(to, content):
# It sends an email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your_email@gmail.com',
'your_password') # Enter your password
server.sendmail('your_email@gmail.com', to, content)
server.close()
def fetchSecret():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('people', 'v1', credentials=creds)
def fetchNameEmail():
"""Fetches name and their email ids from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses').execute()
connections = results.get('connections', [])
name1List = []
emailList = []
for person in connections:
names = person.get('names', [])
emails = person.get('emailAddresses', [])
if names and emails:
name = names[0].get('displayName')
name1List.append(name)
email = emails[0]['value']
emailList.append(email)
nameEmailList = zip(name1List, emailList)
return sorted(nameEmailList, key=lambda x: x[0])
def name1Lower(name1List):
"""Makes all the names lowercase for name-email id list"""
name1ListLower = list(map(lambda x: x.lower(), name1List))
return list(map(lambda x: x.split(), name1ListLower))
def fetchNamePhoneNo():
"""Fetches name and their phone numbers from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses,phoneNumbers').execute()
connections = results.get('connections', [])
name2List = []
phoneNoList = []
for person in connections:
names = person.get('names', [])
phones = person.get('phoneNumbers', [])
if phones:
name = names[0].get('displayName')
name2List.append(name)
phone = phones[0]['value']
phoneNoList.append(phone)
namePhoneNoList = zip(name2List, phoneNoList)
return sorted(namePhoneNoList, key=lambda x: x[0])
def name2Lower(name2List):
"""Makes all the names lowercase for name-phone number list"""
name2ListLower = list(map(lambda x: x.lower(), name2List))
return list(map(lambda x: x.split(), name2ListLower))
def queryLowerSplit(query):
"""Makes all the query elements lowercase"""
queryLower = query.lower()
return queryLower.split()
def takeCommand():
# It takes microphone input from user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.adjust_for_ambient_noise(source)
r.pause_threshold = 1
audio = r.listen(source,
phrase_time_limit=4) # it converts the audio input into string and gives a span of 4 sec to an user to speak
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
# query = r.recognize_sphinx(audio) #instead of that we can use this is offline but accuray very poor
print(f"User said: {query}")
except:
print("Say that again please...")
return "None"
return query
# Commenting out function since its not being used.
# def splitWords(query):
# return lst[0].split()
def givenews():
news_api = config('News_API')
speak("News for today..Lets begin")
url = f"http://newsapi.org/v2/top-headlines?country=in&apiKey={news_api}"
news = requests.get(url).text
news_dict = json.loads(news)
arts = news_dict['articles']
i = 1
for article in arts[:-1]:
speak(article['title'])
print(f"\n{i}. {article['title']}")
speak("Moving on to the next news....")
i += 1
for article in arts[-1:]:
speak(article['title'])
print(f"\n{i}. {article['title']}")
speak("Thanks for listening...")
speak("Stay tuned for more updated news")
if __name__ == '__main__':
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if "how are you" in query:
speak("I'm fine sir, what about you?")
elif "fine" in query:
speak("It's good to know that you are fine.")
elif "who are you" in query:
speak("My name is Era. I'm a desktop assistant made by Mr Aritra.")
elif 'wikipedia' in query:
# sentences=2 means return first two string
results = wikipedia.summary(query, sentences=2)
speak("According to wikipedia..")
# print("According to wikipedia")
# print(results)
speak(results)
elif 'open spartan' in query or 'spartan' in query:
spartanPath = "C:\\Program Files\\Wavefunction\\Spartan14v114\\WF14gui64.exe"
os.startfile(spartanPath)
elif 'open youtube' in query:
webbrowser.open('http://www.youtube.com')
elif 'open google' in query:
webbrowser.open('https://www.google.co.in/')
elif 'open stackoverflow' in query:
webbrowser.open('https://stackoverflow.com/')
elif 'play music' in query or 'play song' in query or 'play some music' in query or 'play another music' in query or 'change song' in query or 'next song' in query:
music_dir = 'G:\\RabindraSangeet'
songs = os.listdir(music_dir)
os.startfile(os.path.join(
music_dir, songs[random.randint(0, len(songs) - 1)]))
elif 'the time' in query or 'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query or 'open visual studio' in query:
codePath = "C:\\Users\\Aritra Roy\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'quit' in query or 'exit' in query or 'close' in query:
speak("Thanks for using Era!!!")
exit()
elif 'awesome' in query or 'wow' in query or 'amazing' in query or 'wonderful' in query:
speak("Thank you sir, I'm always here for you")
elif 'what' in query or 'who' in query or 'where' in query or 'can you' in query:
webbrowser.open(f"https://www.google.com/search?&q={query}")
speak(wikipedia.summary(query, sentences=2))
elif 'email to' in query or 'send a mail' in query or 'mail to' in query:
# This will send mail only if there is any matching name in last of query
# the last word should be in all strings contain a name which is exist as key in nameList
zippedNameEmailList = fetchNameEmail()
name1List, emailList = zip(*zippedNameEmailList)
name1FinalList = name1Lower(name1List)
queryList = queryLowerSplit(query)
i = 0
for item in name1FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
speak("What is your message ?")
content = takeCommand()
to = emailList[i - 1]
sendEmail(to, content)
speak("Email has been sent")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to send your email right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name1FinalList):
speak("Contact not found")
elif 'phone' in query or 'make call' in query or 'call' in query:
zippednamePhoneNoList = fetchNamePhoneNo()
name2List, phoneNoList = zip(*zippednamePhoneNoList)
name2FinalList = name2Lower(name2List)
queryList = queryLowerSplit(query)
i = 0
for item in name2FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
client = Client(account_sid, auth_token)
call = client.calls.create(
twiml='<Response><Say>Ahoy, World!</Say></Response>',
to=phoneNoList[i - 1],
from_='+1XXXXXXXXXX'
)
speak("Calling has been initiated")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to call right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name2FinalList):
speak("Contact not found")
elif 'headlines' in query or 'news' in query or 'headline' in query:
givenews()
|
from BucketLib.bucket import Bucket
from collections_helper.collections_spec_constants import MetaConstants
spec = {
MetaConstants.NUM_BUCKETS: 3,
MetaConstants.NUM_SCOPES_PER_BUCKET: 2,
MetaConstants.NUM_COLLECTIONS_PER_SCOPE: 2,
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000,
MetaConstants.REMOVE_DEFAULT_COLLECTION: False,
MetaConstants.CREATE_COLLECTIONS_USING_MANIFEST_IMPORT: True,
Bucket.bucketType: Bucket.Type.MEMBASE,
Bucket.replicaNumber: Bucket.ReplicaNum.ONE,
Bucket.ramQuotaMB: 300,
Bucket.replicaIndex: 1,
Bucket.flushEnabled: Bucket.FlushBucket.ENABLED,
Bucket.priority: Bucket.Priority.LOW,
Bucket.conflictResolutionType: Bucket.ConflictResolution.SEQ_NO,
Bucket.maxTTL: 0,
Bucket.storageBackend: Bucket.StorageBackend.couchstore,
Bucket.evictionPolicy: Bucket.EvictionPolicy.FULL_EVICTION,
Bucket.compressionMode: Bucket.CompressionMode.ACTIVE,
"buckets": {
"default": {
Bucket.maxTTL: 350,
MetaConstants.NUM_SCOPES_PER_BUCKET: 5,
MetaConstants.NUM_COLLECTIONS_PER_SCOPE: 10,
MetaConstants.NUM_ITEMS_PER_COLLECTION: 1000,
Bucket.ramQuotaMB: 17000,
Bucket.bucketType: Bucket.Type.MEMBASE,
"privileges": [
"Perm1"
],
"scopes": {
"scope1": {
"privileges": [
"Perm1"
],
"collections": {
"collection_1": {
"rbac": "rbac1",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000000,
Bucket.maxTTL: 300
},
"collections_2": {
"rbac": "rbac2",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000000,
Bucket.maxTTL: 300
}
}
},
"scope2": {
"privileges": [
"Perm1"
],
"collections": {
"collection1": {
"rbac": "rbac1",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000000,
Bucket.maxTTL: 100
},
"collection2": {
"rbac": "rbac2",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000000,
Bucket.maxTTL: 100
}
}
}
}
},
"bucket1": {
Bucket.bucketType: Bucket.Type.MEMBASE,
MetaConstants.NUM_SCOPES_PER_BUCKET: 2,
MetaConstants.NUM_COLLECTIONS_PER_SCOPE: 2,
MetaConstants.NUM_ITEMS_PER_COLLECTION: 0,
"privileges": [
"Perm2"
],
"scopes": {
"scope1": {
"privileges": [
"Perm1"
],
"collections": {
"collection1": {
"rbac": "rbac1",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000,
Bucket.maxTTL: 150
},
"collection2": {
"rbac": "rbac2",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000,
Bucket.maxTTL: 150
}
}
},
"scope2": {
"privileges": [
"Perm1"
],
"collections": {
"collection1": {
"rbac": "rbac1",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000,
Bucket.maxTTL: 200
},
"collection2": {
"rbac": "rbac2",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 5000,
Bucket.maxTTL: 200
}
}
}
}
},
"bucket2": {
Bucket.bucketType: Bucket.Type.EPHEMERAL,
Bucket.evictionPolicy: Bucket.EvictionPolicy.NRU_EVICTION,
MetaConstants.NUM_SCOPES_PER_BUCKET: 1,
MetaConstants.NUM_COLLECTIONS_PER_SCOPE: 2,
MetaConstants.NUM_ITEMS_PER_COLLECTION: 0,
"privileges": [
"Perm3"
],
"scopes": {
"scope1": {
"privileges": [
"Perm1"
],
"collections": {
"collection1": {
"rbac": "rbac1",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 15000,
Bucket.maxTTL: 50
},
"collection2": {
"rbac": "rbac2",
MetaConstants.NUM_ITEMS_PER_COLLECTION: 15000,
Bucket.maxTTL: 50
}
}
}
}
}
}
}
|
# Translate alphabet based text to braille.
from . import mapAlphaToBraille, mapBrailleToAlpha
CAPITAL = chr(10272) # โ
NUMBER = chr(10300) # โ ผ
UNRECOGNIZED = '?'
# There is no braille symbol for a generic quote (").
# There is only open quotation (โ) and closed quotation (โ).
# Therefore we must keep track of what the last quotation was
# so that we may convert the generic quotation to a specific one.
open_quotes = True
def extract_words(string):
# Split up a sentence based on whitespace (" ") and new line ("\n") chars.
words = string.split(" ")
result = []
for word in words:
temp = word.split("\n")
for item in temp:
result.append(item)
return result
def is_braille(char):
# Return true if a char is braille.
if len(char) > 1:
return False
return char in mapBrailleToAlpha.letters \
or char in mapBrailleToAlpha.numbers \
or char in mapBrailleToAlpha.punctuation \
or char in mapBrailleToAlpha.contractions \
or char == CAPITAL \
or char == NUMBER
def trim(word):
# Remove punctuation around a word. Example: cat." becomes cat
while len(word) is not 0 and not word[0].isalnum():
word = word[1:]
while len(word) is not 0 and not word[-1].isalnum():
word = word[:-1]
return word
def numbers_handler(word):
# Replace each group of numbers in a word to their respective braille representation.
if word == "":
return word
result = word[0]
if word[0].isdigit():
result = NUMBER + mapAlphaToBraille.numbers.get(word[0])
for i in range(1, len(word)):
if word[i].isdigit() and word[i-1].isdigit():
result += mapAlphaToBraille.numbers.get(word[i])
elif word[i].isdigit():
result += NUMBER + mapAlphaToBraille.numbers.get(word[i])
else:
result += word[i]
return result
def capital_letters_handler(word):
# Put the capital escape code before each capital letter.
if word == "":
return word
result = ""
for char in word:
if char.isupper():
result += CAPITAL + char.lower()
else:
result += char.lower()
return result
def find_utf_code(char):
# Find the UTF code of a particular character. Used what an unidentified char is found.
if len(char) != 1:
return -1
for i in range(0, 55000):
if char == chr(i):
return i
def char_to_braille(char):
# Convert an alphabetic char to braille.
if is_braille(char):
return char
elif char == "\n":
return "\n"
elif char == "\"":
global open_quotes
if open_quotes:
open_quotes = not open_quotes
return mapAlphaToBraille.punctuation.get("โ")
else:
open_quotes = not open_quotes
return mapAlphaToBraille.punctuation.get("โ")
elif char in mapAlphaToBraille.letters and char.isupper():
return CAPITAL + mapAlphaToBraille.letters.get(char)
elif char in mapAlphaToBraille.letters:
return mapAlphaToBraille.letters.get(char)
elif char in mapAlphaToBraille.punctuation:
return mapAlphaToBraille.punctuation.get(char)
else:
print("Unrecognized Symbol:", char, "with UTF code:", find_utf_code(char))
return UNRECOGNIZED
def word_to_braille(word):
# Convert an alphabetic word to braille.
if word in mapAlphaToBraille.contractions:
return mapAlphaToBraille.contractions.get(word)
else:
result = ""
for char in word:
result += char_to_braille(char)
return result
def build_braille_word(trimmed_word, shavings, index, braille):
# Translate a trimmed word to braille then re-attach the shavings.
if shavings == "":
braille += word_to_braille(trimmed_word)
else:
for i in range(0, len(shavings)):
if i == index and trimmed_word is not "":
braille += word_to_braille(trimmed_word)
braille += word_to_braille(shavings[i])
if index == len(shavings): # If the shavings are all at the beginning.
braille += word_to_braille(trimmed_word)
return braille
def translate(string):
# Convert alphabetic text to braille.
braille = ""
words = extract_words(string)
for word in words:
word = numbers_handler(word)
word = capital_letters_handler(word)
trimmed_word = trim(word) # Remove punctuation (ex: change dog?" to dog)
untrimmed_word = word
index = untrimmed_word.find(trimmed_word)
shavings = untrimmed_word.replace(trimmed_word, "")
braille = build_braille_word(trimmed_word, shavings, index, braille) + " "
return braille[:-1] # Remove the final space that was added.
'''
The Algorithm for Translating Alphabet Based Text to Grade 2 Braille:
1. Split up the text into words by dividing them based on whitespace characters.
- Whitespace includes spaces (' ') and new lines ('\n')
2. For each word, handle the numbers first.
- Numbers in braille use the same symbols as the first 10 letters of the alphabet.
- The number '7' and the letter 'g' are both represented by 'โ '.
- To differentiate between numbers and letters, an escape code (โ ผ) is placed before groups of numbers.
- Therefore '7' is actually 'โ ผโ ' whereas 'g' is only 'โ '.
- In this step, only the numbers are dealt with, so there will be a mix of both braille and Alphabet symbols.
- Example: "123-456-JUNK" becomes "โ ผโ โ โ -โ ผโ โ โ -JUNK"
3. Handle the capitals.
- Similarly to numbers in braille, capital letters need an escape code (โ ).
- The escape code (โ ) is added to the beginning of each capital letter and the letter is changed to lowercase.
- Example 1: "โ ผโ โ โ -โ ผโ โ โ -JUNK" becomes "โ ผโ โ โ -โ ผโ โ โ -โ jโ uโ nโ k". The dashes still remain.
- Example 2: "Sweet" becomes "โ sweet". The non-capital letters remain untouched.
4. Trim the word.
- Sometimes the words extracted contain punctuation attached to them such as commas or brackets.
- Words need to be trimmed so that they can be converted to contractions.
- Example: The word "the" is represented by a single braille symbol (โ ฎ).
- If the word "the" has punctuation around it ("the!") then it will not be interpreted correctly.
- This is also why capitals are converted to lowercase in step 3 because "The" would not work either.
- The characters that are trimmed off are called "shavings".
- Example: In the word "!where?", the shavings are "!?" and the trimmed word is "where".
5. Build the translation.
a) Check to see if the trimmed word can be contracted.
- This includes common words like "the", "in", "you" etc...
b) Translate the remaining characters that are still alphabetic.
c) Translate the shavings (this will mostly just be punctuation).
- Exceptions to be mindful of:
- There is no braille symbol for a generic quote (")
- There is only open quotation (โ) and closed quotation (โ).
- Therefore we must keep track of what the last quotation was to convert it correctly.
'''
|
def main():
key = '15,11,19,18,16,03,07,14,02,20,04,12,09,06,01,05,17,13,10,08'
plain_text = 'distributed anonymous'.replace(' ','')
encrypted_text = list(plain_text)
key = key.split(',')
for index, char in enumerate(plain_text):
new_index = key.index(str(index + 1).zfill(2))
# print(new_index + 1)
# encrypted_text += plain_text[new_index]
encrypted_text[new_index] = char
# encrypted_text[index] = 1
# print(index)
encrypted_text = "".join(encrypted_text)
print(encrypted_text.upper())
if __name__ == '__main__':
main()
|
from docxbuilder.docx.docx import *
|
from .nrmse import nrmse
from .rsquared import rsquared
from .breuschpagan import breuschpagan
from .condition_number import condition_number
from .durbin_watson import durbin_watson
from .jarque_bera import jarque_bera
from .ljungbox import ljungbox
from .mae import mae
from .mape import mape
from .mse import mse
from .rmse import rmse
from .degrees_of_freedom import degrees_of_freedom
from .decomp_rssd import decomp_rssd
from .harvey_collier import harvey_collier
from .rainbox import rainbox
from .vars_obs import vars_obs
from .mdape import mdape
from .smape import smape
from .mda import mda
from .mase import mase
from .mfe import mfe
from .log_accuracy_ratio import log_accuracy_ratio
from .max_error import max_error
from .dummy_constant import dummy_constant
from .dummy_mean import dummy_mean
from .dummy_median import dummy_median
from .effect_share import effect_share
from .spend_share import spend_share
|
from sklearn.cluster import KMeans
def cluster_embeddings(embeddings, sentences, num_clusters):
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(sentences[sentence_id])
return clustered_sentences
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import time
import numpy as np
import torch
import torch.nn as nn
# device = 'cpu'
# if torch.cuda.is_available():
# device = 'cuda'
"""
BLSTM (max/mean) encoder
"""
class InferSent(nn.Module):
def __init__(self, config, device='cuda'):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
self.device = device
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = torch.sort(sent_len, descending=True)
# sent_len_sorted = sent_len_sorted.copy()
idx_unsort = torch.sort(idx_sort)[1]
idx_sort = idx_sort.to(self.device)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = idx_unsort.to(self.device)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
# sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).to(device)
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb, sent_output.permute(1, 0, 2)
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)' % (
len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = torch.sort(lengths)[0], torch.sort(-lengths)[1]
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.to(self.device)
with torch.no_grad():
batch = self.forward(
(batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = torch.sort(idx_sort)[1]
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings) / (time.time() - tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [
[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.to(self.device)
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
"""
BiGRU encoder (first/last hidden states)
"""
class BGRUlastEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(BGRUlastEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.GRU(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: seqlen x batch x worddim
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed)
emb = torch.cat((hn[0], hn[1]), 1) # batch x 2*nhid
sent_output_un, _ = torch.nn.utils.rnn.pad_packed_sequence(sent_output,
batch_first=True)
# Un-sort by length
idx_unsort = torch.sort(idx_sort)[1]
emb = emb.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output_un
"""
BLSTM encoder with projection after BiLSTM
"""
class BLSTMprojEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(BLSTMprojEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True, dropout=self.dpout_model)
self.proj_enc = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = self.proj_enc(sent_output.view(-1, 2 * self.enc_lstm_dim)).view(
-1, bsize, 2 * self.enc_lstm_dim)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len).unsqueeze(1).to(self.device)
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
emb = torch.max(sent_output, 0)[0].squeeze(0)
return emb, sent_output.permute(1, 0, 2)
"""
LSTM encoder
"""
class LSTMEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(LSTMEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=False, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len [max_len, ..., min_len] (batch)
# sent (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
# Handling padding in Recurrent Networks
sent_len = np.array(sent_len)
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed) # batch x 2*nhid
sent_hn = hn[0].squeeze(0)
# Un-sort by length
sent_output_un, _ = torch.nn.utils.rnn.pad_packed_sequence(sent_output,
batch_first=True)
idx_unsort = np.argsort(idx_sort)
emb = sent_hn.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output_un
"""
GRU encoder
"""
class GRUEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(GRUEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.GRU(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=False, dropout=self.dpout_model)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
# Sort by length (keep idx)
# sent_len, idx_sort = torch.sort(sent_len, descending=True)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output, hn = self.enc_lstm(sent_packed)
sent_hn = hn.squeeze(0)
# batch x 2*nhid
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
emb = sent_hn.index_select(0, torch.LongTensor(idx_unsort).to(self.device))
return emb, sent_output
"""
Inner attention from "hierarchical attention for document classification"
"""
class InnerAttentionNAACLEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionNAACLEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'], bidirectional=True)
self.proj_key = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.query_embedding = nn.Embedding(1, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_key_proj = self.proj_key(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1, 2 * self.enc_lstm_dim)
sent_key_proj = torch.tanh(sent_key_proj)
# NAACL paper: u_it=tanh(W_w.h_it + b_w) (bsize, seqlen, 2nhid)
sent_w = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(
2) # (bsize, 2*nhid, 1)
Temp = 2
keys = sent_key_proj.bmm(sent_w).squeeze(2) / Temp
# Set probas of padding to zero in softmax
keys = keys + ((keys == 0).float() * -10000)
alphas = self.softmax(keys / Temp).unsqueeze(2).expand_as(sent_output)
# if int(time.time()) % 100 == 0:
# print('w', torch.max(sent_w), torch.min(sent_w))
# print('alphas', alphas[0, :, 0])
emb = torch.sum(alphas * sent_output_proj, 1).squeeze(1)
return emb, sent_output
"""
Inner attention inspired from "Self-attentive ..."
"""
class InnerAttentionMILAEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionMILAEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'], bidirectional=True)
self.proj_key = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=False)
self.query_embedding = nn.Embedding(2, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = 'cuda'
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_key_proj = self.proj_key(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1, 2 * self.enc_lstm_dim)
sent_key_proj = torch.tanh(sent_key_proj)
# NAACL : u_it=tanh(W_w.h_it + b_w) like in NAACL paper
# Temperature
Temp = 3
sent_w1 = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys1 = sent_key_proj.bmm(sent_w1).squeeze(2) / Temp
keys1 = keys1 + ((keys1 == 0).float() * -1000)
alphas1 = self.softmax(keys1).unsqueeze(2).expand_as(sent_key_proj)
emb1 = torch.sum(alphas1 * sent_output_proj, 1).squeeze(1)
sent_w2 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys2 = sent_key_proj.bmm(sent_w2).squeeze(2) / Temp
keys2 = keys2 + ((keys2 == 0).float() * -1000)
alphas2 = self.softmax(keys2).unsqueeze(2).expand_as(sent_key_proj)
emb2 = torch.sum(alphas2 * sent_output_proj, 1).squeeze(1)
sent_w3 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys3 = sent_key_proj.bmm(sent_w3).squeeze(2) / Temp
keys3 = keys3 + ((keys3 == 0).float() * -1000)
alphas3 = self.softmax(keys3).unsqueeze(2).expand_as(sent_key_proj)
emb3 = torch.sum(alphas3 * sent_output_proj, 1).squeeze(1)
sent_w4 = self.query_embedding(
torch.LongTensor(bsize * [1]).to(self.device)).unsqueeze(
2) # (bsize, nhid, 1)
keys4 = sent_key_proj.bmm(sent_w4).squeeze(2) / Temp
keys4 = keys4 + ((keys4 == 0).float() * -1000)
alphas4 = self.softmax(keys4).unsqueeze(2).expand_as(sent_key_proj)
emb4 = torch.sum(alphas4 * sent_output_proj, 1).squeeze(1)
# if int(time.time()) % 100 == 0:
# print('alphas', torch.cat((alphas1.data[0, :, 0],
# alphas2.data[0, :, 0],
# torch.abs(alphas1.data[0, :, 0] -
# alphas2.data[0, :, 0])), 1))
emb = torch.cat((emb1, emb2, emb3, emb4), 1)
return emb, sent_output
"""
Inner attention from Yang et al.
"""
class InnerAttentionYANGEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(InnerAttentionYANGEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim,
config['n_enc_layers'],
bidirectional=True)
self.proj_lstm = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.proj_query = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.proj_enc = nn.Linear(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim,
bias=True)
self.query_embedding = nn.Embedding(1, 2 * self.enc_lstm_dim)
self.softmax = nn.Softmax()
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
bsize = sent.size(1)
sent_len = sent_len.cpu()
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent = sent.index_select(1, torch.LongTensor(idx_sort).to(self.device))
sent_len = np.array(sent_len)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
sent_output = self.enc_lstm(sent_packed)[0]
# seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
sent_output = sent_output.index_select(1, torch.LongTensor(idx_unsort).to(
self.device))
sent_output = sent_output.transpose(0, 1).contiguous()
sent_output_proj = self.proj_lstm(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(
bsize, -1,
2 * self.enc_lstm_dim)
sent_keys = self.proj_enc(sent_output.view(-1,
2 * self.enc_lstm_dim)).view(bsize,
-1,
2 * self.enc_lstm_dim)
sent_max = torch.max(sent_output, 1)[0].squeeze(1) # (bsize, 2*nhid)
sent_summary = self.proj_query(sent_max).unsqueeze(1).expand_as(sent_keys)
# (bsize, seqlen, 2*nhid)
sent_M = torch.tanh(sent_keys + sent_summary)
# (bsize, seqlen, 2*nhid) YANG : M = tanh(Wh_i + Wh_avg
sent_w = self.query_embedding(
torch.LongTensor(bsize * [0]).to(self.device)).unsqueeze(2)
# (bsize, 2*nhid, 1)
sent_alphas = self.softmax(sent_M.bmm(sent_w).squeeze(2)).unsqueeze(1)
# (bsize, 1, seqlen)
# if int(time.time()) % 200 == 0:
# print('w', torch.max(sent_w[0]), torch.min(sent_w[0]))
# print('alphas', sent_alphas[0][0][0:sent_len[0]])
# # Get attention vector
emb = sent_alphas.bmm(sent_output_proj).squeeze(1)
return emb, sent_output
"""
Hierarchical ConvNet
"""
class ConvNetEncoder(nn.Module):
def __init__(self, config, device='cuda'):
super(ConvNetEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.convnet1 = nn.Sequential(
nn.Conv1d(self.word_emb_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet2 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet3 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.convnet4 = nn.Sequential(
nn.Conv1d(2 * self.enc_lstm_dim, 2 * self.enc_lstm_dim, kernel_size=3,
stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.device = device
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (batch)
# sent: (seqlen x batch x worddim)
sent, sent_len = sent_tuple
sent = sent.transpose(0, 1).transpose(1, 2).contiguous()
# batch, nhid, seqlen)
sent = self.convnet1(sent)
u1 = torch.max(sent, 2)[0]
sent = self.convnet2(sent)
u2 = torch.max(sent, 2)[0]
sent = self.convnet3(sent)
u3 = torch.max(sent, 2)[0]
sent = self.convnet4(sent)
u4 = torch.max(sent, 2)[0]
emb = torch.cat((u1, u2, u3, u4), 1)
return emb, sent.permute(0, 2, 1)
"""
BiLSTM
"""
class BiLSTM(nn.Module):
def __init__(self, config, device='cuda'):
super(BiLSTM, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 2,
bidirectional=True, dropout=self.dpout_model)
self.relu = nn.ReLU()
self.projection = nn.Linear(self.word_emb_dim, self.enc_lstm_dim)
self.device = device
def forward(self, sent_tuple):
sent, sent_len = sent_tuple
sent_len = sent_len.cpu()
bsize = sent.size(1)
sent_proj = self.relu(self.projection(sent))
out, (emb_ht, _) = self.enc_lstm(sent_proj)
emb = emb_ht[-2:].transpose(0, 1).contiguous().view(bsize, -1)
return emb, out
"""
Main module for Natural Language Inference
"""
class NLINet(nn.Module):
def __init__(self, config, weights=None, device='cuda'):
super(NLINet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.embedding = nn.Embedding(config['n_words'], config['word_emb_dim'])
self.embedding.load_state_dict({'weight': weights})
self.embedding.weight.requires_grad = False
self.encoder = eval(self.encoder_type)(config, device=device)
self.inputdim = 4 * 2 * self.enc_lstm_dim
self.inputdim = 4 * self.inputdim if self.encoder_type in \
["ConvNetEncoder",
"InnerAttentionMILAEncoder"] else self.inputdim
self.inputdim = self.inputdim / 2 if self.encoder_type == "LSTMEncoder" \
else self.inputdim
self.inputdim = int(self.inputdim)
self.lin1 = nn.Linear(self.inputdim, self.fc_dim)
self.lin2 = nn.Linear(self.fc_dim, self.fc_dim)
self.lin3 = nn.Linear(self.fc_dim, self.n_classes)
for lin in [self.lin1, self.lin2, self.lin3]:
nn.init.xavier_uniform_(lin.weight)
nn.init.zeros_(lin.bias)
if self.nonlinear_fc:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.inputdim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
self.lin1,
nn.ReLU(),
nn.Dropout(p=self.dpout_fc),
self.lin2,
nn.ReLU(),
nn.Dropout(p=self.dpout_fc),
self.lin3
)
def forward(self, s1, s2):
# s1 : (s1, s1_len)
s1_embed = self.embedding(s1[0])
s2_embed = self.embedding(s2[0])
u, s1_out = self.encoder((s1_embed, s1[1]))
v, s2_out = self.encoder((s2_embed, s2[1]))
features = torch.cat((u, v, torch.abs(u - v), u * v), 1)
output = self.classifier(features)
return output, (s1_out, s2_out)
def encode(self, s1, is_probe=False):
# s1 : (s1, s1_len)
s1_embed = self.embedding(s1[0])
emb, out = self.encoder((s1_embed, s1[1]))
return emb, out
"""
Main module for Classification
"""
class ClassificationNet(nn.Module):
def __init__(self, config):
super(ClassificationNet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.encoder = eval(self.encoder_type)(config)
self.inputdim = 2 * self.enc_lstm_dim
self.inputdim = 4 * self.inputdim if self.encoder_type == "ConvNetEncoder" else self.inputdim
self.inputdim = self.enc_lstm_dim if self.encoder_type == "LSTMEncoder" else self.inputdim
self.classifier = nn.Sequential(
nn.Linear(self.inputdim, 512),
nn.Linear(512, self.n_classes),
)
def forward(self, s1):
# s1 : (s1, s1_len)
u = self.encoder(s1)
output = self.classifier(u)
return output
def encode(self, s1):
emb, output = self.encoder(s1)
return emb, output
|
import os
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
import numpy as np
save_dir = './Mnist_data'
# save_dir ์ ๋ฐ์ดํฐ ๋ด๋ ค๋ฐ๊ธฐ
data_sets = mnist.read_data_sets(save_dir,
dtype=tf.uint8,
reshape=False,
validation_size=1000)
data_splits = ['train', 'test', 'validation']
#todo mnist dataset -> tfrecord ๋ณํ
for d in range(len(data_splits)):
print('saving:' + data_splits[d])
data_set = data_sets[d]
print('data_set.images shape:', data_set.images.shape, ', data_set.labels shape:', data_set.labels.shape)
filename = os.path.join(save_dir, 'tfrecord', data_splits[d] + '.tfrecords')
writer = tf.python_io.TFRecordWriter(filename)
for index in range(data_set.images.shape[0]):
image = data_set.images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[1]])),
'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[2]])),
'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[3]])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(data_set.labels[index])])),
'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image]))
}))
writer.write(example.SerializeToString())
writer.close()
#todo tfrecord data read
filename = os.path.join(save_dir, 'tfrecord', 'train.tfrecords')
record_iterator = tf.python_io.tf_record_iterator(filename)
serialized_img_example = next(record_iterator)
example = tf.train.Example()
example.ParseFromString(serialized_img_example)
image = example.features.feature['image_raw'].bytes_list.value
label = example.features.feature['label'].int64_list.value[0]
width = example.features.feature['width'].int64_list.value[0]
height = example.features.feature['height'].int64_list.value[0]
img_flat = np.fromstring(image[0], dtype=np.uint8)
img_reshaped = img_flat.reshape((height, width, -1))
print(img_reshaped)
|
"""Function Introspection
Functions are first class objects
They have attributes __doc__ __annotations__
We can attach our own attributes
def my_func(a, b):
return a + b
my_func.category = 'math'
my_func.sub_category = 'arithmetic'
print(my_func.category) # math
print(my_func.sub_category) # arithmetic
print(dir(my_func)) # ['__annotations__', '__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__', '__getattribute__', '__globals__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__kwdefaults__', '__le__', '__lt__', '__module__', '__name__', '__ne__', '__new__', '__qualname__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'category', 'sub_category']
Function Attributes:__name__, __defaults__, __kwdefaults__
__name__ -> name of function
__defaults__ -> tuple containing positional parameter defaults
__kwdefaults__ -> dictionary containing keyword-only parameter defaults
"""
def my_func(a, b=2, c=3, *, kw1, kw2=2):
pass
my_func.__name__ # my_func
my_func.__defaults__ # (2, 3)
my_func.__kwdefaults__ # {'kw2',: 2}
# Function Attribute:__code__
def my_func(a, b=1, *args, **kwargs):
i = 10
b = min(i, b)
return a * b # my_func.__code__
# <code object my_func at 0x00020EEF ..
"""
This __code__ object itself has various properties, which include:
co_varnames # parameter and local variables
my_func.__code__.co_varnames -> ('a', 'b', 'args', 'i')
parameter names first, followed by local variable names
co_argcount number of parameters
my_func,__code__.co_argcount -> 2
does not count *args and **kwargs!
# The inspect module import inspect
ismethod(obj) isfunction(obj) istoutine(obj) and many others...
# What's the difference between a function and a mothod?
Classes and objects have attributes - an object that is bound (to the class or the object)
An attribute that is callable is called a method
"""
def my_func(): # func is bound to my_obj, an instance of MyClass
pass
# isfunction(my_func) -> True
def MyClass: # ismethod(my_func) -> False
def func(self):
# pass # isfunction(my_obj.func) -> False
my_obj = MyClass() # ismethod(my_obj.func) -> True
# isroutine(my_func) -> True
# isroutine(my_obj.func) -> True
'''Code Introspection
I can recover the source code of our functions/methods
inspect.getsource(my_func) -> a string contaning our entire def statement, including annotations, docstrings, etc.
I can find out in which module our function was created
inspect.getmodule(my_func) -> <module '__main__'>
inspect.getmodule(print) -> <module 'builtins' (built-in)>
inspect.getmodule(math.sin) -> <module 'math' (built-in)>
Function Comments
# setting up variable
i = 10
# TODO: Implement function
# some additional notes
'''
def my_func(a, b=1):
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Configuration file parsing
"""
import json
import re
import os
import traceback
from typing import Any, Dict, Iterable, List, Optional, Union, TYPE_CHECKING, TextIO
from skupper_router.management.entity import camelcase
from ..dispatch import QdDll
from .qdrouter import QdSchema
if TYPE_CHECKING:
from .schema import EntityType
try:
from ..dispatch import LogAdapter, LOG_WARNING, LOG_ERROR
_log_imported = True
except ImportError:
# unit test cannot import since LogAdapter not set up
_log_imported = False
class Config:
"""Load config entities from skrouterd.conf and validated against L{QdSchema}."""
def __init__(
self,
filename: Optional[str] = None,
schema: QdSchema = QdSchema(),
raw_json: bool = False
) -> None:
self.schema = schema
self.config_types = [et for et in schema.entity_types.values()
if schema.is_configuration(et)]
self._log_adapter = LogAdapter("AGENT") if _log_imported else None
if filename:
try:
self.load(filename, raw_json)
except Exception as e:
raise Exception("Cannot load configuration file %s: %s"
% (filename, e))
else:
self.entities: List[Dict[str, Any]] = []
def _log(self, level, text):
if self._log_adapter is not None:
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self._log_adapter.log(level, text, info[0], info[1])
@staticmethod
def transform_sections(sections: List[Any]) -> None:
for s in sections:
s[0] = camelcase(s[0])
s[1] = dict((camelcase(k), v) for k, v in s[1].items())
if s[0] == "address":
s[0] = "router.config.address"
if s[0] == "autoLink":
s[0] = "router.config.autoLink"
if s[0] == "exchange":
s[0] = "router.config.exchange"
if s[0] == "binding":
s[0] = "router.config.binding"
def _parse(self, lines: Iterable[str]) -> List[Any]:
"""
Parse config file format into a section list
The config file format is a text file in JSON-ish syntax. It allows
the user to define a set of Entities which contain Attributes.
Attributes may be either a single item or a map of nested attributes.
Entities and map Attributes start with a single open brace on a line by
itself (no non-comment text after the opening brace!)
Entities and map Attributes are terminated by a single closing brace
that appears on a line by itself (no trailing comma and no non-comment
trailing text!)
Entity names and Attribute names and items are NOT enclosed in quotes
nor are they terminated with commas, however some select Attributes
have values which are expected to be valid JSON (double quoted
strings, etc)
Unlike JSON the config file also allows comments. A comment begins
with the '#' character and is terminated at the end of line.
"""
# note: these regexes expect that trailing comment and leading and
# trailing whitespace has been removed
#
entity = re.compile(r'([\w-]+)[ \t]*{[ \t]*$') # WORD {
attr_map = re.compile(r'([\$]*[\w-]+)[ \t]*:[ \t]*{[ \t]*$') # WORD: {
json_map = re.compile(r'("[\$]*[\w-]+)"[ \t]*:[ \t]*{[ \t]*$') # "WORD": {
attr_item = re.compile(r'([\w-]+)[ \t]*:[ \t]*([^ \t{]+.*)$') # WORD1: VALUE
end = re.compile(r'^}$') # } (only)
json_end = re.compile(r'}$') # } (at eol)
# The 'pattern:' and 'bindingKey:' attributes in the schema are special
# snowflakes. They allow '#' characters in their value, so they cannot
# be treated as comment delimiters
special_snowflakes = ['pattern', 'bindingKey', 'hostname']
hash_ok = re.compile(r'([\w-]+)[ \t]*:[ \t]*([\S]+).*')
# the 'openProperties' and 'groups' attributes are also special
# snowflakes in that their value is expected to be valid JSON. These
# values do allow single line comments which are stripped out, but the
# remaining content is expected to be valid JSON.
json_snowflakes = ['openProperties', 'groups']
self._line_num = 1
self._child_level = 0
self._in_json = False
def sub(line):
"""Do substitutions to make line json-friendly"""
line = line.strip()
# ignore empty and comment lines
if not line or line.startswith("#"):
self._line_num += 1
return ""
# watch JSON for embedded maps and map terminations
# always pass JSON as-is except appending a comma at the end
if self._in_json:
if json_map.search(line):
self._child_level += 1
if json_end.search(line):
self._child_level -= 1
if self._child_level == 0:
self._in_json = False
line = re.sub(json_end, r'},', line)
self._line_num += 1
return line
# filter off pattern items before stripping comments
if attr_item.search(line):
if re.sub(attr_item, r'\1', line) in special_snowflakes:
self._line_num += 1
return re.sub(hash_ok, r'"\1": "\2",', line)
# now trim trailing comment
line = line.split('#')[0].strip()
if entity.search(line):
# WORD { --> ["WORD", {
line = re.sub(entity, r'["\1", {', line)
elif attr_map.search(line):
# WORD: { --> ["WORD": {
key = re.sub(attr_map, r'\1', line)
line = re.sub(attr_map, r'"\1": {', line)
self._child_level += 1
if key in json_snowflakes:
self._in_json = True
elif attr_item.search(line):
# WORD: VALUE --> "WORD": "VALUE"
line = re.sub(attr_item, r'"\1": "\2",', line)
elif end.search(line):
# } --> "}," or "}]," depending on nesting level
if self._child_level > 0:
line = re.sub(end, r'},', line)
self._child_level -= 1
else:
# end top level entity list item
line = re.sub(end, r'}],', line)
else:
# unexpected syntax, let json parser figure it out
self._log(LOG_WARNING,
"Invalid config file syntax (line %d):\n"
">>> %s"
% (self._line_num, line))
self._line_num += 1
return line
js_text = "[%s]" % ("\n".join([sub(l) for l in lines]))
if self._in_json or self._child_level != 0:
self._log(LOG_WARNING,
"Configuration file: invalid entity nesting detected.")
spare_comma = re.compile(r',\s*([]}])') # Strip spare commas
js_text = re.sub(spare_comma, r'\1', js_text)
# Convert dictionary keys to camelCase
try:
sections = json.loads(js_text)
except Exception as e:
self.dump_json("Contents of failed config file", js_text)
raise
Config.transform_sections(sections)
return sections
def _parserawjson(self, lines):
"""Parse raw json config file format into a section list"""
def sub(line):
# ignore comment lines that start with "[whitespace] #"
line = "" if line.strip().startswith('#') else line
return line
js_text = "%s" % ("\n".join([sub(l) for l in lines]))
try:
sections = json.loads(js_text)
except Exception as e:
self.dump_json("Contents of failed json-format config file", js_text)
raise
Config.transform_sections(sections)
return sections
def get_config_types(self) -> List['EntityType']:
return self.config_types
def load(
self,
source: Union[str, TextIO, List[str]],
raw_json: bool = False
) -> None:
"""
Load a configuration file.
@param source: A file name, open file object or iterable list of lines
@param raw_json: Source is pure json not needing conf-style substitutions
"""
if isinstance(source, str):
raw_json |= source.endswith(".json")
with open(source) as f:
self.load(f, raw_json)
else:
sections = self._parserawjson(source) if raw_json else self._parse(source)
# Add missing singleton sections
for et in self.get_config_types():
if et.singleton and not et.deprecated and not [s for s in sections if s[0] == et.short_name]:
sections.append((et.short_name, {}))
entities = [dict(type=self.schema.long_name(s[0]), **s[1]) for s in sections]
self.schema.validate_all(entities)
self.entities = entities
def by_type(self, entity_type: str) -> List[Union[Dict[str, Any], Any]]:
"""Return entities of given type"""
entity_type = self.schema.long_name(entity_type)
return [e for e in self.entities if e['type'] == entity_type]
def remove(self, entity: Dict[str, Any]) -> None:
self.entities.remove(entity)
def dump_json(self, title: str, js_text: str) -> None:
# Function for config file parse failure logging.
# js_text is the pre-processed config-format json string or the
# raw json-format string that was presented to the json interpreter.
# The logs generated here correlate exactly to the line, column,
# and character numbers reported by json error exceptions.
# For each line 'Column 1' immediately follows the vertical bar.
self._log(LOG_ERROR, title)
lines = js_text.split("\n")
for idx in range(len(lines)):
self._log(LOG_ERROR, "Line %d |%s" % (idx + 1, lines[idx]))
class PolicyConfig(Config):
def __init__(
self,
filename: Optional[str] = None,
schema: QdSchema = QdSchema(),
raw_json: bool = False
) -> None:
super(PolicyConfig, self).__init__(filename, schema, raw_json)
def get_config_types(self) -> List[Any]:
return [s for s in self.config_types if 'policy' in s.name]
def configure_dispatch(dispatch: int, lib_handle: int, filename: str) -> None:
"""Called by C router code to load configuration file and do configuration"""
qd = QdDll(lib_handle)
dispatch = qd.qd_dispatch_p(dispatch)
config = Config(filename)
# NOTE: Can't import agent until dispatch C extension module is initialized.
from .agent import Agent
agent = Agent(dispatch, qd)
qd.qd_dispatch_set_agent(dispatch, agent)
def configure(attributes):
"""Configure an entity and remove it from config"""
agent.configure(attributes)
config.remove(attributes)
modules = set(agent.schema.entity_type("log").attributes["module"].atype.tags)
for l in config.by_type('log'):
configure(l)
modules.remove(l["module"])
# Add default entities for any log modules not configured.
for m in modules:
agent.configure(attributes=dict(type="log", module=m))
# Configure and prepare the router before we can activate the agent.
configure(config.by_type('router')[0])
qd.qd_dispatch_prepare(dispatch)
qd.qd_router_setup_late(dispatch) # Actions requiring active management agent.
agent.activate("$_management_internal")
from skupper_router_internal.display_name.display_name import DisplayNameService
displayname_service = DisplayNameService()
qd.qd_dispatch_register_display_name_service(dispatch, displayname_service)
# Configure policy and policy manager before vhosts
policyDir = config.by_type('policy')[0]['policyDir']
policyDefaultVhost = config.by_type('policy')[0]['defaultVhost']
useHostnamePatterns = config.by_type('policy')[0]['enableVhostNamePatterns']
maxMessageSize = config.by_type('policy')[0]['maxMessageSize']
for a in config.by_type("policy"):
configure(a)
agent.policy.set_default_vhost(policyDefaultVhost)
agent.policy.set_use_hostname_patterns(useHostnamePatterns)
agent.policy.set_max_message_size(maxMessageSize)
# Configure a block of types
for t in ("sslProfile",
"router.config.address", "router.config.autoLink",
"router.config.exchange", "router.config.binding",
"vhost", "httpListener", "httpConnector", "tcpListener", "tcpConnector"):
for a in config.by_type(t):
configure(a)
if t == "sslProfile":
display_file_name = a.get('uidNameMappingFile')
if display_file_name:
ssl_profile_name = a.get('name')
displayname_service.add(ssl_profile_name, display_file_name)
# Configure remaining types except for connector and listener
for e in config.entities:
if not e['type'] in ['io.skupper.router.connector', 'io.skupper.router.listener']:
configure(e)
# Load the vhosts from the .json files in policyDir
# Only vhosts are loaded. Other entities in these files are silently discarded.
if not policyDir == '':
apath = os.path.abspath(policyDir)
for i in os.listdir(policyDir):
if i.endswith(".json"):
pconfig = PolicyConfig(os.path.join(apath, i))
for a in pconfig.by_type("vhost"):
agent.configure(a)
# Static configuration is loaded except for connectors and listeners.
# Configuring connectors and listeners last starts inter-router and user messages
# when the router is in a known and repeatable initial configuration state.
for t in "connector", "listener":
for a in config.by_type(t):
configure(a)
|
from tests.unit.dataactcore.factories.domain import OfficeFactory
from tests.unit.dataactcore.factories.staging import (DetachedAwardFinancialAssistanceFactory,
PublishedAwardFinancialAssistanceFactory)
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_detached_award_financial_assistance_2_2'
def test_column_headers(database):
expected_subset = {'row_number', 'funding_office_code', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success_ignore_null_pafa(database):
""" Test that empty funding office codes aren't matching invalid office codes from the base record. """
office_1 = OfficeFactory(office_code='12345b', contract_funding_office=False,
financial_assistance_funding_office=True)
# Base record has no funding office code, future records don't affect it
pub_award_1 = PublishedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='zyxwv_123',
action_date='20181018', award_modification_amendme='0',
is_active=True)
pub_award_2 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='zyxwv_123',
action_date='20181019', award_modification_amendme='1',
is_active=True)
# Base record has an invalid code but new record has a funding office entered (ignore this rule)
pub_award_3 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='abcd_123',
action_date='20181019', award_modification_amendme='0',
is_active=True)
# Base record with a valid office code (case insensitive)
pub_award_4 = PublishedAwardFinancialAssistanceFactory(funding_office_code='12345B', unique_award_key='1234_abc',
action_date='20181019', award_modification_amendme='0',
is_active=True)
# Earliest record inactive, newer record has valid entry, inactive date matching active doesn't mess it up
pub_award_5 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='4321_cba',
action_date='20181018', award_modification_amendme='0',
is_active=False)
pub_award_6 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='4321_cba',
action_date='20181019', award_modification_amendme='1',
is_active=False)
pub_award_7 = PublishedAwardFinancialAssistanceFactory(funding_office_code='12345b', unique_award_key='4321_cba',
action_date='20181019', award_modification_amendme='1',
is_active=True)
# New entry for base award with no office code
det_award_1 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='zyxwv_123',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
# New entry for base award with invalid code but entry has a funding office code
det_award_2 = DetachedAwardFinancialAssistanceFactory(funding_office_code='abd', unique_award_key='abcd_123',
action_date='20181020', award_modification_amendme='1',
correction_delete_indicatr=None)
# New entry for valid funding office
det_award_3 = DetachedAwardFinancialAssistanceFactory(funding_office_code=None, unique_award_key='1234_abc',
action_date='20181020', award_modification_amendme='1',
correction_delete_indicatr=None)
# Correction to base record (ignore)
det_award_4 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='abcd_123',
action_date='20181019', award_modification_amendme='0',
correction_delete_indicatr='C')
# New entry for earliest inactive
det_award_5 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='4321_cba',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
errors = number_of_errors(_FILE, database, models=[office_1, pub_award_1, pub_award_2, pub_award_3,
pub_award_4, pub_award_5, pub_award_6, pub_award_7, det_award_1,
det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" Test fail that empty funding office codes aren't matching invalid office codes from the base record. """
office_1 = OfficeFactory(office_code='12345a', contract_funding_office=False,
financial_assistance_funding_office=True)
office_2 = OfficeFactory(office_code='abcd', contract_funding_office=True,
financial_assistance_funding_office=False)
# Invalid code in record
pub_award_1 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='zyxwv_123',
action_date='20181018', award_modification_amendme='0',
is_active=True)
# Earliest record inactive, newer record has invalid entry
pub_award_2 = PublishedAwardFinancialAssistanceFactory(funding_office_code='12345a', unique_award_key='4321_cba',
action_date='20181018', award_modification_amendme='0',
is_active=False)
pub_award_3 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='4321_cba',
action_date='20181019', award_modification_amendme='1',
is_active=True)
# Has a valid code but it's not a funding assistance office
pub_award_4 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abcd', unique_award_key='123_abc',
action_date='20181018', award_modification_amendme='0',
is_active=True)
# award_modification_amendme number is null
pub_award_5 = PublishedAwardFinancialAssistanceFactory(funding_office_code='abc', unique_award_key='zyxwv_1234',
action_date='20181018', award_modification_amendme=None,
is_active=True)
# Entry for invalid code in base record
det_award_1 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='zyxwv_123',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
# Entry with award_modification_amendme null
det_award_2 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='zyxwv_123',
action_date='20181020', award_modification_amendme=None,
correction_delete_indicatr=None)
# New entry for earliest inactive
det_award_3 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='4321_cba',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
# New entry for has valid non-funding assistance code
det_award_4 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='123_abc',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
# Entry for award_modification_amendme null in base record
det_award_5 = DetachedAwardFinancialAssistanceFactory(funding_office_code='', unique_award_key='zyxwv_1234',
action_date='20181020', award_modification_amendme='2',
correction_delete_indicatr=None)
errors = number_of_errors(_FILE, database, models=[office_1, office_2, pub_award_1, pub_award_2, pub_award_3,
pub_award_4, pub_award_5, det_award_1, det_award_2, det_award_3,
det_award_4, det_award_5])
assert errors == 5
|
import os
import pytest
from monai.networks.nets import EfficientNetBN, resnet18
from pytorch_lightning import seed_everything, Trainer
from kaggle_brain3d.data import BrainScansDM
from kaggle_brain3d.models import LitBrainMRI, make_submission
from tests.test_data import _generate_synthetic_dataset
_PATH_HERE = os.path.dirname(__file__)
@pytest.mark.parametrize("net", ["efficientnet-b0", EfficientNetBN("efficientnet-b0")])
def test_create_model(net):
LitBrainMRI(net=net)
@pytest.mark.parametrize("prepare", [True, False])
def test_train_model(tmpdir, prepare):
seed_everything(42)
_generate_synthetic_dataset(tmpdir, phase="train", scans='FLAIR', nb_users=20)
_generate_synthetic_dataset(tmpdir, phase="test", scans='FLAIR', nb_users=5)
dm = BrainScansDM(
data_dir=tmpdir,
scan_types="FLAIR",
batch_size=2,
cache_dir=tmpdir,
crop_thr=None,
split=0.6,
# train_transforms=rtr.Compose(TRAIN_TRANSFORMS, transform_call=default_transform_call),
# valid_transforms=rtr.Compose(VAL_TRANSFORMS, transform_call=default_transform_call),
)
if prepare:
dm.prepare_data()
net = resnet18(pretrained=False, spatial_dims=3, n_input_channels=1, num_classes=1)
model = LitBrainMRI(net=net)
trainer = Trainer(max_epochs=2, gpus=0)
trainer.fit(model, datamodule=dm)
df_sub = make_submission(model, dm.test_dataloader())
assert len(df_sub) == 5
|
from enum import Enum
class Cloud(Enum):
ALIBABA = 'ALIBABA'
AWS = 'AWS'
AZURE = 'AZURE'
DIGITALOCEAN = 'DIGITALOCEAN'
GCP = 'GCP'
IBM = 'IBM'
ORACLE = 'ORACLE'
|
# -*- coding: utf-8 -*-
"""
/dms/help_document/views_show.py
.. zeigt die Hilfen zu einer Web-Applikation an
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 13.09.2007 Beginn der Arbeit
"""
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response
from django.template.loader import get_template
from django.template import Context
from django.utils.safestring import SafeData, mark_safe, SafeUnicode
from django.utils.translation import ugettext as _
from dms.queries import get_site_by_id
from dms.queries import get_item_container
from dms.queries import get_app
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def helpdocument_show(request, app):
""" zeigt die kompletten Hilfetexte """
def sort_by_description(item):
return item[1]['title']
# --- Objekt mit Hilfetext(en) suchen
path = request.path.replace('form', 'formular')
item_container = get_item_container(path, '')
tSection = get_template('app/helpdocument/help_item.html')
im = 'from dms.%s.help_form import help_form' % app
try:
exec(im)
except:
exec('from dms.help_form import help_form')
site = get_site_by_id(1)
if app == 'comment':
description = _(u'Kommentarsystem')
else:
description = get_app('dms'+app).description
my_title = _(u'Djambala-Hilfesystem')
help_items = help_form.items()
help_items.sort(key=sort_by_description)
content = ''
for help in help_items:
if not help[1].has_key('info'):
section = Context ( { 'help_name' : help[0],
'help_title': help[1]['title'],
'help_text' : help[1]['help'] } )
content += tSection.render(section)
vars = {
'header_title': my_title,
'title': my_title,
'sub_title': description,
'site': site,
'this_site_title': my_title,
'text': mark_safe(item_container.item.text),
'text_more': mark_safe(item_container.item.text_more),
'image_url': item_container.item.image_url,
'content': mark_safe(content),
}
return render_to_response ( 'base_help.html', vars )
|
print("Hello, Github users!")
|
from typing import NamedTuple, Iterable, Sequence, Tuple, Callable
from requests import request, RequestException, Response
from saga_requests.utils import get_reduce_data
from .exceptions import SagaCompensationException
class SagaContext(NamedTuple):
context_path: Iterable[str]
class SagaRequestKwargs(NamedTuple):
data: dict = {}
headers: dict = {}
def parse(self, saga_context: dict):
return self._parse(
saga_context=saga_context,
params={
'data': self.data,
'headers': self.headers
}
)
def _parse(self, saga_context: dict, params) -> dict:
for key in params:
if isinstance(params[key], dict):
params[key] = self._parse(saga_context, params[key])
elif isinstance(params[key], SagaContext):
params[key] = get_reduce_data(saga_context, params[key].context_path)
return params
class SagaRequest(NamedTuple):
method: str
url: str
request_kwargs: SagaRequestKwargs
on_success: Callable[[dict], None] = None
on_failure: Callable[[Response, Exception], None] = None
class SagaAction(NamedTuple):
id: str
act: SagaRequest
compensate: SagaRequest
def exec_act(self, saga_context: dict) -> dict:
return self._exec_request(
saga_request=self.act,
saga_context=saga_context
)
def exec_compensate(self, saga_context: dict) -> dict:
return self._exec_request(
saga_request=self.compensate,
saga_context=saga_context
)
@staticmethod
def _exec_request(saga_request: SagaRequest, saga_context: dict):
"""
:raises: RequestException, ValueError
"""
response = request(
method=saga_request.method,
url=saga_request.url,
**saga_request.request_kwargs.parse(saga_context=saga_context)
)
try:
response.raise_for_status()
result = response.json()
except (RequestException, ValueError) as e:
if saga_request.on_failure:
saga_request.on_failure(response, e)
raise
if saga_request.on_success:
saga_request.on_success(result)
return result
class SagaBuilder:
_actions: Sequence[SagaAction]
_act_context: dict = {}
_compensate_context: dict = {}
def __init__(self, actions):
self._actions = actions
def run(self) -> Tuple[dict, dict]:
for i in range(len(self._actions)):
_action = self._actions[i]
try:
self._act_context[_action.id] = _action.exec_act(self._act_context.copy())
except (RequestException, ValueError) as e:
if i > 0:
self._compensate_successful_actions(interrupted_action_index=i)
if isinstance(e, ValueError):
raise
return self._act_context, self._compensate_context
def _compensate_successful_actions(self, interrupted_action_index: int):
_actions_to_compensate = self._actions[:interrupted_action_index][::-1]
for j in range(len(_actions_to_compensate)):
_action = _actions_to_compensate[j]
try:
self._compensate_context[_action.id] = _action.exec_compensate(self._act_context.copy())
except SagaCompensationException:
# TODO
pass
|
## Copyright 2019 Gia-Lac TRAN, Edwin V. Bonilla, John P. Cunningham, Pietro Michiardi, and Maurizio Filippone
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from dgp.dgp import Dgp
from dgp.dgp_rf import Dgp_Rf
from dgp.dgp_sorf import Dgp_Sorf
from dgp.dgp_sorf_optim import Dgp_Sorf_Optim
from dgp.dgp_sorf_optim_mcd import Dgp_Sorf_Optim_Mcd
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from envs.multiagentenv import MultiAgentEnv
from envs.starcraft2.maps import get_map_params
import atexit
from operator import attrgetter
from copy import deepcopy
import numpy as np
import enum
import math
from absl import logging
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import protocol
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import raw_pb2 as r_pb
from s2clientprotocol import debug_pb2 as d_pb
races = {
"R": sc_common.Random,
"P": sc_common.Protoss,
"T": sc_common.Terran,
"Z": sc_common.Zerg,
}
difficulties = {
"1": sc_pb.VeryEasy,
"2": sc_pb.Easy,
"3": sc_pb.Medium,
"4": sc_pb.MediumHard,
"5": sc_pb.Hard,
"6": sc_pb.Harder,
"7": sc_pb.VeryHard,
"8": sc_pb.CheatVision,
"9": sc_pb.CheatMoney,
"A": sc_pb.CheatInsane,
}
actions = {
"move": 16, # target: PointOrUnit
"attack": 23, # target: PointOrUnit
"stop": 4, # target: None
"heal": 386, # Unit
}
class Direction(enum.IntEnum):
NORTH = 0
SOUTH = 1
EAST = 2
WEST = 3
class StarCraft2Env(MultiAgentEnv):
"""The StarCraft II environment for decentralised multi-agent
micromanagement scenarios.
"""
def __init__(
self,
map_name="2s3z",
step_mul=8,
move_amount=2,
difficulty="7",
game_version=None,
seed=None,
continuing_episode=False,
obs_all_health=True,
obs_own_health=True,
obs_last_action=False,
obs_pathing_grid=False,
obs_terrain_height=False,
obs_instead_of_state=False,
obs_timestep_number=False,
state_last_action=True,
state_timestep_number=False,
reward_sparse=False,
reward_only_positive=True,
reward_death_value=10,
reward_win=200,
reward_defeat=0,
reward_negative_scale=0.5,
reward_scale=True,
reward_scale_rate=20,
replay_dir="",
replay_prefix="",
window_size_x=1920,
window_size_y=1200,
heuristic_ai=False,
heuristic_rest=False,
debug=False,
):
"""
Create a StarCraftC2Env environment.
Parameters
----------
map_name : str, optional
The name of the SC2 map to play (default is "8m"). The full list
can be found by running bin/map_list.
step_mul : int, optional
How many game steps per agent step (default is 8). None
indicates to use the default map step_mul.
move_amount : float, optional
How far away units are ordered to move per step (default is 2).
difficulty : str, optional
The difficulty of built-in computer AI bot (default is "7").
game_version : str, optional
StarCraft II game version (default is None). None indicates the
latest version.
seed : int, optional
Random seed used during game initialisation. This allows to
continuing_episode : bool, optional
Whether to consider episodes continuing or finished after time
limit is reached (default is False).
obs_all_health : bool, optional
Agents receive the health of all units (in the sight range) as part
of observations (default is True).
obs_own_health : bool, optional
Agents receive their own health as a part of observations (default
is False). This flag is ignored when obs_all_health == True.
obs_last_action : bool, optional
Agents receive the last actions of all units (in the sight range)
as part of observations (default is False).
obs_pathing_grid : bool, optional
Whether observations include pathing values surrounding the agent
(default is False).
obs_terrain_height : bool, optional
Whether observations include terrain height values surrounding the
agent (default is False).
obs_instead_of_state : bool, optional
Use combination of all agents' observations as the global state
(default is False).
obs_timestep_number : bool, optional
Whether observations include the current timestep of the episode
(default is False).
state_last_action : bool, optional
Include the last actions of all agents as part of the global state
(default is True).
state_timestep_number : bool, optional
Whether the state include the current timestep of the episode
(default is False).
reward_sparse : bool, optional
Receive 1/-1 reward for winning/loosing an episode (default is
False). Whe rest of reward parameters are ignored if True.
reward_only_positive : bool, optional
Reward is always positive (default is True).
reward_death_value : float, optional
The amount of reward received for killing an enemy unit (default
is 10). This is also the negative penalty for having an allied unit
killed if reward_only_positive == False.
reward_win : float, optional
The reward for winning in an episode (default is 200).
reward_defeat : float, optional
The reward for loosing in an episode (default is 0). This value
should be nonpositive.
reward_negative_scale : float, optional
Scaling factor for negative rewards (default is 0.5). This
parameter is ignored when reward_only_positive == True.
reward_scale : bool, optional
Whether or not to scale the reward (default is True).
reward_scale_rate : float, optional
Reward scale rate (default is 20). When reward_scale == True, the
reward received by the agents is divided by (max_reward /
reward_scale_rate), where max_reward is the maximum possible
reward per episode without considering the shield regeneration
of Protoss units.
replay_dir : str, optional
The directory to save replays (default is None). If None, the
replay will be saved in Replays directory where StarCraft II is
installed.
replay_prefix : str, optional
The prefix of the replay to be saved (default is None). If None,
the name of the map will be used.
window_size_x : int, optional
The length of StarCraft II window size (default is 1920).
window_size_y: int, optional
The height of StarCraft II window size (default is 1200).
heuristic_ai: bool, optional
Whether or not to use a non-learning heuristic AI (default False).
heuristic_rest: bool, optional
At any moment, restrict the actions of the heuristic AI to be
chosen from actions available to RL agents (default is False).
Ignored if heuristic_ai == False.
debug: bool, optional
Log messages about observations, state, actions and rewards for
debugging purposes (default is False).
"""
# Map arguments
self.map_name = map_name
map_params = get_map_params(self.map_name)
self.n_agents = map_params["n_agents"]
self.n_enemies = map_params["n_enemies"]
self.episode_limit = map_params["limit"]
self._move_amount = move_amount
self._step_mul = step_mul
self.difficulty = difficulty
# Observations and state
self.obs_own_health = obs_own_health
self.obs_all_health = obs_all_health
self.obs_instead_of_state = obs_instead_of_state
self.obs_last_action = obs_last_action
self.obs_pathing_grid = obs_pathing_grid
self.obs_terrain_height = obs_terrain_height
self.obs_timestep_number = obs_timestep_number
self.state_last_action = state_last_action
self.state_timestep_number = state_timestep_number
if self.obs_all_health:
self.obs_own_health = True
self.n_obs_pathing = 8
self.n_obs_height = 9
# Rewards args
self.reward_sparse = reward_sparse
self.reward_only_positive = reward_only_positive
self.reward_negative_scale = reward_negative_scale
self.reward_death_value = reward_death_value
self.reward_win = reward_win
self.reward_defeat = reward_defeat
self.reward_scale = reward_scale
self.reward_scale_rate = reward_scale_rate
# Other
self.game_version = game_version
self.continuing_episode = continuing_episode
self._seed = seed
self.heuristic_ai = heuristic_ai
self.heuristic_rest = heuristic_rest
self.debug = debug
self.window_size = (window_size_x, window_size_y)
self.replay_dir = replay_dir
self.replay_prefix = replay_prefix
# Actions
self.n_actions_no_attack = 6
self.n_actions_move = 4
self.n_actions = self.n_actions_no_attack + self.n_enemies
# Map info
self._agent_race = map_params["a_race"]
self._bot_race = map_params["b_race"]
self.shield_bits_ally = 1 if self._agent_race == "P" else 0
self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
self.unit_type_bits = map_params["unit_type_bits"]
self.map_type = map_params["map_type"]
self.max_reward = (
self.n_enemies * self.reward_death_value + self.reward_win
)
self.agents = {}
self.enemies = {}
self._episode_count = 0
self._episode_steps = 0
self._total_steps = 0
self._obs = None
self.battles_won = 0
self.battles_game = 0
self.timeouts = 0
self.force_restarts = 0
self.last_stats = None
self.death_tracker_ally = np.zeros(self.n_agents)
self.death_tracker_enemy = np.zeros(self.n_enemies)
self.previous_ally_units = None
self.previous_enemy_units = None
self.last_action = np.zeros((self.n_agents, self.n_actions))
self._min_unit_type = 0
self.marine_id = self.marauder_id = self.medivac_id = 0
self.hydralisk_id = self.zergling_id = self.baneling_id = 0
self.stalker_id = self.colossus_id = self.zealot_id = 0
self.max_distance_x = 0
self.max_distance_y = 0
self.map_x = 0
self.map_y = 0
self.terrain_height = None
self.pathing_grid = None
self._run_config = None
self._sc2_proc = None
self._controller = None
self.max_n_agents = self.n_agents
self.max_n_enemies = self.n_enemies
self.enemy_tags = None
self.ally_tags = None
self.dist_mtx = None
# Try to avoid leaking SC2 processes on shutdown
atexit.register(lambda: self.close())
def _launch(self):
"""Launch the StarCraft II game."""
self._run_config = run_configs.get(version=self.game_version)
_map = maps.get(self.map_name)
# Setting up the interface
interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
self._sc2_proc = self._run_config.start(window_size=self.window_size)
self._controller = self._sc2_proc.controller
# Request to create the game
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=_map.path,
map_data=self._run_config.map_data(_map.path)),
realtime=False,
random_seed=self._seed)
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=races[self._bot_race],
difficulty=difficulties[self.difficulty])
self._controller.create_game(create)
join = sc_pb.RequestJoinGame(race=races[self._agent_race],
options=interface_options)
self._controller.join_game(join)
game_info = self._controller.game_info()
map_info = game_info.start_raw
map_play_area_min = map_info.playable_area.p0
map_play_area_max = map_info.playable_area.p1
self.max_distance_x = map_play_area_max.x - map_play_area_min.x
self.max_distance_y = map_play_area_max.y - map_play_area_min.y
self.map_x = map_info.map_size.x
self.map_y = map_info.map_size.y
if map_info.pathing_grid.bits_per_pixel == 1:
vals = np.array(list(map_info.pathing_grid.data)).reshape(
self.map_x, int(self.map_y / 8))
self.pathing_grid = np.transpose(np.array([
[(b >> i) & 1 for b in row for i in range(7, -1, -1)]
for row in vals], dtype=np.bool))
else:
self.pathing_grid = np.invert(np.flip(np.transpose(np.array(
list(map_info.pathing_grid.data), dtype=np.bool).reshape(
self.map_x, self.map_y)), axis=1))
self.terrain_height = np.flip(
np.transpose(np.array(list(map_info.terrain_height.data))
.reshape(self.map_x, self.map_y)), 1) / 255
def reset(self):
"""Reset the environment. Required after each full episode.
Returns initial observations and states.
"""
self._episode_steps = 0
if self._episode_count == 0:
# Launch StarCraft II
self._launch()
else:
self._restart()
# Information kept for counting the reward
self.death_tracker_ally = np.zeros(self.n_agents)
self.death_tracker_enemy = np.zeros(self.n_enemies)
self.previous_ally_units = None
self.previous_enemy_units = None
self.win_counted = False
self.defeat_counted = False
self.last_action = np.zeros((self.n_agents, self.n_actions))
if self.heuristic_ai:
self.heuristic_targets = [None] * self.n_agents
try:
self._obs = self._controller.observe()
self.init_units()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
if self.debug:
logging.debug("Started Episode {}"
.format(self._episode_count).center(60, "*"))
self._calc_distance_mtx()
temp = self.get_masks()
#self.get_entities()
return self.get_obs(), self.get_state()
def _restart(self):
"""Restart the environment by killing all units on the map.
There is a trigger in the SC2Map file, which restarts the
episode when there are no units left.
"""
try:
self._kill_all_units()
self._controller.step(2)
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
def full_restart(self):
"""Full restart. Closes the SC2 process and launches a new one. """
self._sc2_proc.close()
self._launch()
self.force_restarts += 1
def step(self, actions):
"""A single environment step. Returns reward, terminated, info."""
actions_int = [int(a) for a in actions]
self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
# Collect individual actions
sc_actions = []
if self.debug:
logging.debug("Actions".center(60, "-"))
for a_id, action in enumerate(actions_int):
if not self.heuristic_ai:
sc_action = self.get_agent_action(a_id, action)
else:
sc_action, action_num = self.get_agent_action_heuristic(
a_id, action)
actions[a_id] = action_num
if sc_action:
sc_actions.append(sc_action)
# Send action request
req_actions = sc_pb.RequestAction(actions=sc_actions)
try:
self._controller.actions(req_actions)
# Make step in SC2, i.e. apply actions
self._controller.step(self._step_mul)
# Observe here so that we know if the episode is over.
self._obs = self._controller.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
return 0, True, {}
self._total_steps += 1
self._episode_steps += 1
# Update units
game_end_code = self.update_units()
self._calc_distance_mtx()
terminated = False
reward = self.reward_battle()
info = {"battle_won": False}
if game_end_code is not None:
# Battle is over
terminated = True
self.battles_game += 1
if game_end_code == 1 and not self.win_counted:
self.battles_won += 1
self.win_counted = True
info["battle_won"] = True
if not self.reward_sparse:
reward += self.reward_win
else:
reward = 1
elif game_end_code == -1 and not self.defeat_counted:
self.defeat_counted = True
if not self.reward_sparse:
reward += self.reward_defeat
else:
reward = -1
elif self._episode_steps >= self.episode_limit:
# Episode limit reached
terminated = True
if self.continuing_episode:
info["episode_limit"] = True
self.battles_game += 1
self.timeouts += 1
if self.debug:
logging.debug("Reward = {}".format(reward).center(60, '-'))
if terminated:
self._episode_count += 1
if self.reward_scale:
reward /= self.max_reward / self.reward_scale_rate
return reward, terminated, info
def get_agent_action(self, a_id, action):
"""Construct the action for agent a_id."""
avail_actions = self.get_avail_agent_actions(a_id)
assert avail_actions[action] == 1, \
"Agent {} cannot perform action {}".format(a_id, action)
unit = self.get_unit_by_id(a_id)
tag = unit.tag
x = unit.pos.x
y = unit.pos.y
if action == 0:
# no-op (valid only when dead)
assert unit.health == 0, "No-op only available for dead agents."
if self.debug:
logging.debug("Agent {}: Dead".format(a_id))
return None
elif action == 1:
# stop
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["stop"],
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Stop".format(a_id))
elif action == 2:
# move north
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x, y=y + self._move_amount),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move North".format(a_id))
elif action == 3:
# move south
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x, y=y - self._move_amount),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move South".format(a_id))
elif action == 4:
# move east
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x + self._move_amount, y=y),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move East".format(a_id))
elif action == 5:
# move west
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x - self._move_amount, y=y),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move West".format(a_id))
else:
# attack/heal units that are in range
target_id = action - self.n_actions_no_attack
if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
target_unit = self.agents[target_id]
action_name = "heal"
else:
target_unit = self.enemies[target_id]
action_name = "attack"
action_id = actions[action_name]
target_tag = target_unit.tag
cmd = r_pb.ActionRawUnitCommand(
ability_id=action_id,
target_unit_tag=target_tag,
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {} {}s unit # {}".format(
a_id, action_name, target_id))
sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
return sc_action
def get_agent_action_heuristic(self, a_id, action):
unit = self.get_unit_by_id(a_id)
tag = unit.tag
target = self.heuristic_targets[a_id]
if unit.unit_type == self.medivac_id:
if (target is None or self.agents[target].health == 0 or
self.agents[target].health == self.agents[target].health_max):
min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
min_id = -1
for al_id, al_unit in self.agents.items():
if al_unit.unit_type == self.medivac_id:
continue
if (al_unit.health != 0 and
al_unit.health != al_unit.health_max):
dist = self.distance(unit.pos.x, unit.pos.y,
al_unit.pos.x, al_unit.pos.y)
if dist < min_dist:
min_dist = dist
min_id = al_id
self.heuristic_targets[a_id] = min_id
if min_id == -1:
self.heuristic_targets[a_id] = None
return None, 0
action_id = actions['heal']
target_tag = self.agents[self.heuristic_targets[a_id]].tag
else:
if target is None or self.enemies[target].health == 0:
min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
min_id = -1
for e_id, e_unit in self.enemies.items():
if (unit.unit_type == self.marauder_id and
e_unit.unit_type == self.medivac_id):
continue
if e_unit.health > 0:
dist = self.distance(unit.pos.x, unit.pos.y,
e_unit.pos.x, e_unit.pos.y)
if dist < min_dist:
min_dist = dist
min_id = e_id
self.heuristic_targets[a_id] = min_id
if min_id == -1:
self.heuristic_targets[a_id] = None
return None, 0
action_id = actions['attack']
target_tag = self.enemies[self.heuristic_targets[a_id]].tag
action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack
# Check if the action is available
if (self.heuristic_rest and
self.get_avail_agent_actions(a_id)[action_num] == 0):
# Move towards the target rather than attacking/healing
if unit.unit_type == self.medivac_id:
target_unit = self.agents[self.heuristic_targets[a_id]]
else:
target_unit = self.enemies[self.heuristic_targets[a_id]]
delta_x = target_unit.pos.x - unit.pos.x
delta_y = target_unit.pos.y - unit.pos.y
if abs(delta_x) > abs(delta_y): # east or west
if delta_x > 0: # east
target_pos=sc_common.Point2D(
x=unit.pos.x + self._move_amount, y=unit.pos.y)
action_num = 4
else: # west
target_pos=sc_common.Point2D(
x=unit.pos.x - self._move_amount, y=unit.pos.y)
action_num = 5
else: # north or south
if delta_y > 0: # north
target_pos=sc_common.Point2D(
x=unit.pos.x, y=unit.pos.y + self._move_amount)
action_num = 2
else: # south
target_pos=sc_common.Point2D(
x=unit.pos.x, y=unit.pos.y - self._move_amount)
action_num = 3
cmd = r_pb.ActionRawUnitCommand(
ability_id = actions['move'],
target_world_space_pos = target_pos,
unit_tags = [tag],
queue_command = False)
else:
# Attack/heal the target
cmd = r_pb.ActionRawUnitCommand(
ability_id = action_id,
target_unit_tag = target_tag,
unit_tags = [tag],
queue_command = False)
sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
return sc_action, action_num
def reward_battle(self):
"""Reward function when self.reward_spare==False.
Returns accumulative hit/shield point damage dealt to the enemy
+ reward_death_value per enemy unit killed, and, in case
self.reward_only_positive == False, - (damage dealt to ally units
+ reward_death_value per ally unit killed) * self.reward_negative_scale
"""
if self.reward_sparse:
return 0
reward = 0
delta_deaths = 0
delta_ally = 0
delta_enemy = 0
neg_scale = self.reward_negative_scale
# update deaths
for al_id, al_unit in self.agents.items():
if not self.death_tracker_ally[al_id]:
# did not die so far
prev_health = (
self.previous_ally_units[al_id].health
+ self.previous_ally_units[al_id].shield
)
if al_unit.health == 0:
# just died
self.death_tracker_ally[al_id] = 1
if not self.reward_only_positive:
delta_deaths -= self.reward_death_value * neg_scale
delta_ally += prev_health * neg_scale
else:
# still alive
delta_ally += neg_scale * (
prev_health - al_unit.health - al_unit.shield
)
for e_id, e_unit in self.enemies.items():
if not self.death_tracker_enemy[e_id]:
prev_health = (
self.previous_enemy_units[e_id].health
+ self.previous_enemy_units[e_id].shield
)
if e_unit.health == 0:
self.death_tracker_enemy[e_id] = 1
delta_deaths += self.reward_death_value
delta_enemy += prev_health
else:
delta_enemy += prev_health - e_unit.health - e_unit.shield
if self.reward_only_positive:
reward = abs(delta_enemy + delta_deaths) # shield regeneration
else:
reward = delta_enemy + delta_deaths - delta_ally
return reward
def get_total_actions(self):
"""Returns the total number of actions an agent could ever take."""
return self.n_actions
@staticmethod
def distance(x1, y1, x2, y2):
"""Distance between two points."""
return math.hypot(x2 - x1, y2 - y1)
def unit_shoot_range(self, agent_id):
"""Returns the shooting range for an agent."""
return 6
def unit_sight_range(self, agent_id):
"""Returns the sight range for an agent."""
return 9
def unit_max_cooldown(self, unit):
"""Returns the maximal cooldown for a unit."""
switcher = {
self.marine_id: 15,
self.marauder_id: 25,
self.medivac_id: 200, # max energy
self.stalker_id: 35,
self.zealot_id: 22,
self.colossus_id: 24,
self.hydralisk_id: 10,
self.zergling_id: 11,
self.baneling_id: 1
}
return switcher.get(unit.unit_type, 15)
def save_replay(self):
"""Save a replay."""
prefix = self.replay_prefix or self.map_name
replay_dir = self.replay_dir or ""
replay_path = self._run_config.save_replay(
self._controller.save_replay(), replay_dir=replay_dir, prefix=prefix)
logging.info("Replay saved at: %s" % replay_path)
def unit_max_shield(self, unit):
"""Returns maximal shield for a given unit."""
if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
return 80 # Protoss's Stalker
if unit.unit_type == 73 or unit.unit_type == self.zealot_id:
return 50 # Protoss's Zaelot
if unit.unit_type == 4 or unit.unit_type == self.colossus_id:
return 150 # Protoss's Colossus
if unit.unit_type == 48:
return 1
def can_move(self, unit, direction):
"""Whether a unit can move in a given direction."""
m = self._move_amount / 2
if direction == Direction.NORTH:
x, y = int(unit.pos.x), int(unit.pos.y + m)
elif direction == Direction.SOUTH:
x, y = int(unit.pos.x), int(unit.pos.y - m)
elif direction == Direction.EAST:
x, y = int(unit.pos.x + m), int(unit.pos.y)
else:
x, y = int(unit.pos.x - m), int(unit.pos.y)
if self.check_bounds(x, y) and self.pathing_grid[x, y]:
return True
return False
def get_surrounding_points(self, unit, include_self=False):
"""Returns the surrounding points of the unit in 8 directions."""
x = int(unit.pos.x)
y = int(unit.pos.y)
ma = self._move_amount
points = [
(x, y + 2 * ma),
(x, y - 2 * ma),
(x + 2 * ma, y),
(x - 2 * ma, y),
(x + ma, y + ma),
(x - ma, y - ma),
(x + ma, y - ma),
(x - ma, y + ma),
]
if include_self:
points.append((x, y))
return points
def check_bounds(self, x, y):
"""Whether a point is within the map bounds."""
return (0 <= x < self.map_x and 0 <= y < self.map_y)
def get_surrounding_pathing(self, unit):
"""Returns pathing values of the grid surrounding the given unit."""
points = self.get_surrounding_points(unit, include_self=False)
vals = [
self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
for x, y in points
]
return vals
def get_surrounding_height(self, unit):
"""Returns height values of the grid surrounding the given unit."""
points = self.get_surrounding_points(unit, include_self=True)
vals = [
self.terrain_height[x, y] if self.check_bounds(x, y) else 1
for x, y in points
]
return vals
# ํน์ Agent 'i'์ ๋ถ๋ถ ๊ด์ธก ์ ๋ณด๋ฅผ ๊ฐ์ ธ์จ๋ค.
def get_obs_agent(self, agent_id):
"""Returns observation for agent_id.
NOTE: Agents should have access only to their local observations
during decentralised execution.
"""
unit = self.get_unit_by_id(agent_id)
nf_al = 4 + self.unit_type_bits
nf_en = 4 + self.unit_type_bits
if self.obs_all_health:
nf_al += 1 + self.shield_bits_ally
nf_en += 1 + self.shield_bits_enemy
if self.obs_last_action:
nf_al += self.n_actions
nf_own = self.unit_type_bits
if self.obs_own_health:
nf_own += 1 + self.shield_bits_ally
move_feats_len = self.n_actions_move
if self.obs_pathing_grid:
move_feats_len += self.n_obs_pathing
if self.obs_terrain_height:
move_feats_len += self.n_obs_height
move_feats = np.zeros(move_feats_len, dtype=np.float32)
enemy_feats = np.zeros((self.n_enemies, nf_en), dtype=np.float32)
ally_feats = np.zeros((self.n_agents - 1, nf_al), dtype=np.float32)
own_feats = np.zeros(nf_own, dtype=np.float32)
if unit.health > 0: # otherwise dead, return all zeros
x = unit.pos.x
y = unit.pos.y
# ์์ผ๋ฒ์
sight_range = self.unit_sight_range(agent_id)
# ์์ง์์ด ๊ฐ๋ฅํ ๋ฐฉํฅ์ ๊ฐ์ ธ์จ๋ค (๋, ์ ,๋จ ,๋ถ)
avail_actions = self.get_avail_agent_actions(agent_id)
for m in range(self.n_actions_move):
move_feats[m] = avail_actions[m + 2]
ind = self.n_actions_move
if self.obs_pathing_grid:
move_feats[
ind : ind + self.n_obs_pathing
] = self.get_surrounding_pathing(unit)
ind += self.n_obs_pathing
if self.obs_terrain_height:
move_feats[ind:] = self.get_surrounding_height(unit)
# ์ ๊ตฐ ์ ๋ณด๋ค
for e_id, e_unit in self.enemies.items():
e_x = e_unit.pos.x
e_y = e_unit.pos.y
dist = self.distance(x, y, e_x, e_y)
# ์์ ์ ์์ผ ๋ฒ์ ์์ ๋ค์ด์์ผ ์ ๋ณด๋ฅผ ์ ์ฅํ๋ค.
if (
dist < sight_range and e_unit.health > 0
): # visible and alive
# Sight range > shoot range
enemy_feats[e_id, 0] = avail_actions[
self.n_actions_no_attack + e_id
] # available
enemy_feats[e_id, 1] = dist / sight_range # distance
enemy_feats[e_id, 2] = (
e_x - x
) / sight_range # relative X
enemy_feats[e_id, 3] = (
e_y - y
) / sight_range # relative Y
ind = 4
if self.obs_all_health:
enemy_feats[e_id, ind] = (
e_unit.health / e_unit.health_max
) # health
ind += 1
if self.shield_bits_enemy > 0:
max_shield = self.unit_max_shield(e_unit)
if max_shield is not None:
enemy_feats[e_id, ind] = (
e_unit.shield / max_shield
) # shield
else:
enemy_feats[e_id, ind] = 0
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(e_unit, False)
enemy_feats[e_id, ind + type_id] = 1 # unit type
# Ally features
al_ids = [
al_id for al_id in range(self.n_agents) if al_id != agent_id
]
for i, al_id in enumerate(al_ids):
al_unit = self.get_unit_by_id(al_id)
al_x = al_unit.pos.x
al_y = al_unit.pos.y
dist = self.distance(x, y, al_x, al_y)
if (
dist < sight_range and al_unit.health > 0
): # visible and alive
ally_feats[i, 0] = 1 # visible
ally_feats[i, 1] = dist / sight_range # distance
ally_feats[i, 2] = (al_x - x) / sight_range # relative X
ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
ind = 4
if self.obs_all_health:
ally_feats[i, ind] = (
al_unit.health / al_unit.health_max
) # health
ind += 1
if self.shield_bits_ally > 0:
max_shield = self.unit_max_shield(al_unit)
ally_feats[i, ind] = (
al_unit.shield / max_shield
) # shield
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(al_unit, True)
ally_feats[i, ind + type_id] = 1
ind += self.unit_type_bits
if self.obs_last_action:
ally_feats[i, ind:] = self.last_action[al_id]
# Own features
ind = 0
if self.obs_own_health:
own_feats[ind] = unit.health / unit.health_max
ind += 1
if self.shield_bits_ally > 0:
max_shield = self.unit_max_shield(unit)
own_feats[ind] = unit.shield / max_shield
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(unit, True)
own_feats[ind + type_id] = 1
agent_obs = np.concatenate(
(
move_feats.flatten(),
enemy_feats.flatten(),
ally_feats.flatten(),
own_feats.flatten(),
)
)
if self.obs_timestep_number:
agent_obs = np.append(agent_obs,
self._episode_steps / self.episode_limit)
if self.debug:
logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
logging.debug("Avail. actions {}".format(
self.get_avail_agent_actions(agent_id)))
logging.debug("Move feats {}".format(move_feats))
logging.debug("Enemy feats {}".format(enemy_feats))
logging.debug("Ally feats {}".format(ally_feats))
logging.debug("Own feats {}".format(own_feats))
return agent_obs
# agent ๋ชจ๋์ ๋ถ๋ถ ๊ด์ธก ์ ๋ณด๋ฅผ ๊ฐ์ ธ์จ๋ค.
def get_obs(self):
"""Returns all agent observations in a list.
NOTE: Agents should have access only to their local observations
during decentralised execution.
"""
agents_obs = [self.get_obs_agent(i) for i in range(self.n_agents)]
return agents_obs
# state ์ ๋ณด๋ฅผ ๊ฐ์ ธ์จ๋ค.
def get_state(self):
"""Returns the global state.
NOTE: This functon should not be used during decentralised execution.
"""
if self.obs_instead_of_state:
obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
np.float32
)
return obs_concat
nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits
ally_state = np.zeros((self.n_agents, nf_al))
enemy_state = np.zeros((self.n_enemies, nf_en))
center_x = self.map_x / 2
center_y = self.map_y / 2
# ์๊ตฐ์ ์ ๋ณด
for al_id, al_unit in self.agents.items():
if al_unit.health > 0:
x = al_unit.pos.x
y = al_unit.pos.y
max_cd = self.unit_max_cooldown(al_unit) # ์ ๋๋ณ ์ฟจํ์ ๊ฐ์ ธ ์ค๊ธฐ
# ์ฒด๋ ฅ
ally_state[al_id, 0] = (
al_unit.health / al_unit.health_max
)
# ์ฟจํ์
if (
self.map_type == "MMM"
and al_unit.unit_type == self.medivac_id
):
ally_state[al_id, 1] = al_unit.energy / max_cd # energy
else:
ally_state[al_id, 1] = (
al_unit.weapon_cooldown / max_cd
)
# ์ ๊ทํ๋ ์ขํ ์ ๋ณด๋ณด
ally_state[al_id, 2] = (
x - center_x
) / self.max_distance_x # relative X
ally_state[al_id, 3] = (
y - center_y
) / self.max_distance_y # relative Y
ind = 4
# ๋ณดํธ๋ง ์ ๋ณด(ํ๋กํ ์ค ํ์ , ๋ง์ผ ๋ค๋ฅธ ์ข
์กฑ์ด๋ฉด 0 ์ผ๋ก ์ธํ
)
if self.shield_bits_ally > 0:
max_shield = self.unit_max_shield(al_unit)
ally_state[al_id, ind] = (
al_unit.shield / max_shield
)
ind += 1
# ์ ๋ ์ข
๋ฅ ์ ๋ณด
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(al_unit, True)
ally_state[al_id, ind + type_id] = 1
# ์ ๊ตฐ์ ์ ๋ณด
for e_id, e_unit in self.enemies.items():
if e_unit.health > 0:
x = e_unit.pos.x
y = e_unit.pos.y
# ์ ๊ตฐ์ ์ฒด๋ ฅ
enemy_state[e_id, 0] = (
e_unit.health / e_unit.health_max
)
# ์ ๊ตฐ์ ์ ๊ทํ๋ ์ขํ
enemy_state[e_id, 1] = (
x - center_x
) / self.max_distance_x # relative X
enemy_state[e_id, 2] = (
y - center_y
) / self.max_distance_y # relative Y
ind = 3
# ๋ณดํธ๋ง ์ ๋ณด(ํ๋กํ ์ค ํ์ , ๋ง์ผ ๋ค๋ฅธ ์ข
์กฑ์ด๋ฉด 0 ์ผ๋ก ์ธํ
)
if self.shield_bits_enemy > 0:
max_shield = self.unit_max_shield(e_unit)
if max_shield is not None:
enemy_state[e_id, ind] = (
e_unit.shield / max_shield
) # shield
else:
enemy_state[e_id, ind] = 0
ind += 1
# ์ ๋ ์ข
๋ฅ ์ ๋ณด
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(e_unit, False)
enemy_state[e_id, ind + type_id] = 1
state = np.append(ally_state.flatten(), enemy_state.flatten())
if self.state_last_action:
state = np.append(state, self.last_action.flatten())
if self.state_timestep_number:
state = np.append(state,
self._episode_steps / self.episode_limit)
state = state.astype(dtype=np.float32)
if self.debug:
logging.debug("STATE".center(60, "-"))
logging.debug("Ally state {}".format(ally_state))
logging.debug("Enemy state {}".format(enemy_state))
if self.state_last_action:
logging.debug("Last actions {}".format(self.last_action))
return state
def get_obs_size(self):
"""Returns the size of the observation."""
nf_al = 4 + self.unit_type_bits
nf_en = 4 + self.unit_type_bits
if self.obs_all_health:
nf_al += 1 + self.shield_bits_ally
nf_en += 1 + self.shield_bits_enemy
own_feats = self.unit_type_bits
if self.obs_own_health:
own_feats += 1 + self.shield_bits_ally
if self.obs_timestep_number:
own_feats += 1
if self.obs_last_action:
nf_al += self.n_actions
move_feats = self.n_actions_move
if self.obs_pathing_grid:
move_feats += self.n_obs_pathing
if self.obs_terrain_height:
move_feats += self.n_obs_height
enemy_feats = self.n_enemies * nf_en
ally_feats = (self.n_agents - 1) * nf_al
return move_feats + enemy_feats + ally_feats + own_feats
def get_state_size(self):
"""Returns the size of the global state."""
if self.obs_instead_of_state:
return self.get_obs_size() * self.n_agents
nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits
enemy_state = self.n_enemies * nf_en
ally_state = self.n_agents * nf_al
size = enemy_state + ally_state
if self.state_last_action:
size += self.n_agents * self.n_actions
if self.state_timestep_number:
size += 1
return size
def get_unit_type_id(self, unit, ally):
"""Returns the ID of unit type in the given scenario."""
if ally: # use new SC2 unit types
type_id = unit.unit_type - self._min_unit_type
else: # use default SC2 unit types
if self.map_type == "stalkers_and_zealots":
# id(Stalker) = 74, id(Zealot) = 73, id(Marine) = 48
if unit.unit_type == 73:
type_id = 0
elif unit.unit_type == 74:
type_id = 1
else:
type_id = 2
elif self.map_type == "colossi_stalkers_zealots":
# id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4
if unit.unit_type == 4:
type_id = 0
elif unit.unit_type == 74:
type_id = 1
else:
type_id = 2
elif self.map_type == "bane":
if unit.unit_type == 9:
type_id = 0
else:
type_id = 1
elif self.map_type == "MMM":
if unit.unit_type == 51:
type_id = 0
elif unit.unit_type == 48:
type_id = 1
else:
type_id = 2
return type_id
def get_avail_agent_actions(self, agent_id):
"""Returns the available actions for agent_id."""
unit = self.get_unit_by_id(agent_id)
if unit.health > 0:
# cannot choose no-op when alive
avail_actions = [0] * self.n_actions
# stop should be allowed
avail_actions[1] = 1
# see if we can move
if self.can_move(unit, Direction.NORTH):
avail_actions[2] = 1
if self.can_move(unit, Direction.SOUTH):
avail_actions[3] = 1
if self.can_move(unit, Direction.EAST):
avail_actions[4] = 1
if self.can_move(unit, Direction.WEST):
avail_actions[5] = 1
# Can attack only alive units that are alive in the shooting range
shoot_range = self.unit_shoot_range(agent_id)
target_items = self.enemies.items()
if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
# Medivacs cannot heal themselves or other flying units
target_items = [
(t_id, t_unit)
for (t_id, t_unit) in self.agents.items()
if t_unit.unit_type != self.medivac_id
]
for t_id, t_unit in target_items:
if t_unit.health > 0:
dist = self.distance(
unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
avail_actions[t_id + self.n_actions_no_attack] = 1
return avail_actions
else:
# only no-op allowed
return [1] + [0] * (self.n_actions - 1)
def get_avail_actions(self):
"""Returns the available actions of all agents in a list."""
avail_actions = []
for agent_id in range(self.n_agents):
avail_agent = self.get_avail_agent_actions(agent_id)
avail_actions.append(avail_agent)
return avail_actions
def close(self):
"""Close StarCraft II."""
if self._sc2_proc:
self._sc2_proc.close()
def seed(self):
"""Returns the random seed used by the environment."""
return self._seed
def render(self):
"""Not implemented."""
pass
def _kill_all_units(self):
"""Kill all units on the map."""
units_alive = [
unit.tag for unit in self.agents.values() if unit.health > 0
] + [unit.tag for unit in self.enemies.values() if unit.health > 0]
debug_command = [
d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=units_alive))
]
self._controller.debug(debug_command)
def init_units(self):
"""Initialise the units."""
while True:
# Sometimes not all units have yet been created by SC2
self.agents = {}
self.enemies = {}
ally_units = [
unit
for unit in self._obs.observation.raw_data.units
if unit.owner == 1
]
ally_units_sorted = sorted(
ally_units,
key=attrgetter("unit_type", "pos.x", "pos.y"),
reverse=False,
)
for i in range(len(ally_units_sorted)):
self.agents[i] = ally_units_sorted[i]
if self.debug:
logging.debug(
"Unit {} is {}, x = {}, y = {}".format(
len(self.agents),
self.agents[i].unit_type,
self.agents[i].pos.x,
self.agents[i].pos.y,
)
)
for unit in self._obs.observation.raw_data.units:
if unit.owner == 2:
self.enemies[len(self.enemies)] = unit
if self._episode_count == 0:
self.max_reward += unit.health_max + unit.shield_max
if self._episode_count == 0:
min_unit_type = min(
unit.unit_type for unit in self.agents.values()
)
self._init_ally_unit_types(min_unit_type)
all_agents_created = (len(self.agents) == self.n_agents)
all_enemies_created = (len(self.enemies) <= self.n_enemies)
self.enemy_tags = np.arange(self.n_enemies)
self.ally_tags = np.arange(self.max_n_enemies,
self.max_n_enemies + self.n_agents)
if all_agents_created and all_enemies_created: # all good
return
try:
self._controller.step(1)
self._obs = self._controller.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
self.reset()
def update_units(self):
"""Update units after an environment step.
This function assumes that self._obs is up-to-date.
"""
n_ally_alive = 0
n_enemy_alive = 0
# Store previous state
self.previous_ally_units = deepcopy(self.agents)
self.previous_enemy_units = deepcopy(self.enemies)
for al_id, al_unit in self.agents.items():
updated = False
for unit in self._obs.observation.raw_data.units:
if al_unit.tag == unit.tag:
self.agents[al_id] = unit
updated = True
n_ally_alive += 1
break
if not updated: # dead
al_unit.health = 0
for e_id, e_unit in self.enemies.items():
updated = False
for unit in self._obs.observation.raw_data.units:
if e_unit.tag == unit.tag:
self.enemies[e_id] = unit
updated = True
n_enemy_alive += 1
break
if not updated: # dead
e_unit.health = 0
if (n_ally_alive == 0 and n_enemy_alive > 0
or self.only_medivac_left(ally=True)):
return -1 # lost
if (n_ally_alive > 0 and n_enemy_alive == 0
or self.only_medivac_left(ally=False)):
return 1 # won
if n_ally_alive == 0 and n_enemy_alive == 0:
return 0
return None
def _init_ally_unit_types(self, min_unit_type):
"""Initialise ally unit types. Should be called once from the
init_units function.
"""
self._min_unit_type = min_unit_type
if self.map_type == "marines":
self.marine_id = min_unit_type
elif self.map_type == "stalkers_and_zealots":
self.stalker_id = min_unit_type
self.zealot_id = min_unit_type + 1
elif self.map_type == "colossi_stalkers_zealots":
self.colossus_id = min_unit_type
self.stalker_id = min_unit_type + 1
self.zealot_id = min_unit_type + 2
elif self.map_type == "MMM":
self.marauder_id = min_unit_type
self.marine_id = min_unit_type + 1
self.medivac_id = min_unit_type + 2
elif self.map_type == "zealots":
self.zealot_id = min_unit_type
elif self.map_type == "hydralisks":
self.hydralisk_id = min_unit_type
elif self.map_type == "stalkers":
self.stalker_id = min_unit_type
elif self.map_type == "colossus":
self.colossus_id = min_unit_type
elif self.map_type == "bane":
self.baneling_id = min_unit_type
self.zergling_id = min_unit_type + 1
def only_medivac_left(self, ally):
"""Check if only Medivac units are left."""
if self.map_type != "MMM":
return False
if ally:
units_alive = [
a
for a in self.agents.values()
if (a.health > 0 and a.unit_type != self.medivac_id)
]
if len(units_alive) == 0:
return True
return False
else:
units_alive = [
a
for a in self.enemies.values()
if (a.health > 0 and a.unit_type != self.medivac_id)
]
if len(units_alive) == 1 and units_alive[0].unit_type == 54:
return True
return False
def get_unit_by_id(self, a_id):
"""Get unit by ID."""
return self.agents[a_id]
def get_stats(self):
stats = {
"battles_won": self.battles_won,
"battles_game": self.battles_game,
"battles_draw": self.timeouts,
"win_rate": self.battles_won / self.battles_game,
"timeouts": self.timeouts,
"restarts": self.force_restarts,
}
return stats
def get_entities(self):
"""
Returns list of agent entities and enemy entities in the map (all entities are a fixed size)
All entities together form the global state
For decentralized execution agents should only have access to the
entities specified by get_masks()
"""
all_units = list(self.agents.items()) + list(self.enemies.items())
nf_entity = self.get_entity_size()
center_x = self.map_x / 2
center_y = self.map_y / 2
com_x = sum(unit.pos.x for u_i, unit in all_units) / len(all_units)
com_y = sum(unit.pos.y for u_i, unit in all_units) / len(all_units)
max_dist_com = max(self.distance(unit.pos.x, unit.pos.y, com_x, com_y)
for u_i, unit in all_units)
entities = []
i = 0
avail_actions = self.get_avail_actions()
for u_i, unit in all_units:
entity = np.zeros(nf_entity, dtype=np.float32)
# entity tag
if u_i < self.n_agents:
tag = self.ally_tags[u_i]
else:
tag = self.enemy_tags[u_i - self.n_agents]
entity[tag] = 1
ind = self.max_n_agents + self.max_n_enemies
# available actions (if user controlled entity)
if u_i < self.n_agents:
for ac_i in range(self.n_actions - 2):
entity[ind + ac_i] = avail_actions[u_i][2 + ac_i]
ind += self.n_actions - 2
# unit type
if self.unit_type_bits > 0:
if i < self.n_agents:
type_id = self.get_unit_type_id(unit, True)
else:
type_id = self.get_unit_type_id(unit, False)
i += 1
entity[ind + type_id] = type_id
ind += self.unit_type_bits
if unit.health > 0: # otherwise dead, return all zeros
# health and shield
if self.obs_all_health or self.obs_own_health:
entity[ind] = unit.health / unit.health_max
if ((self.shield_bits_ally > 0 and u_i < self.n_agents) or
(self.shield_bits_enemy > 0 and
u_i >= self.n_agents)):
if unit.shield_max == 0:
entity[ind + 1] = 0
else:
entity[ind + 1] = unit.shield / unit.shield_max
ind += 1 + int(self.shield_bits_ally or
self.shield_bits_enemy)
# energy and cooldown (for ally units only)
if u_i < self.n_agents:
if unit.energy_max > 0.0:
entity[ind] = unit.energy / unit.energy_max
entity[ind + 1] = unit.weapon_cooldown / self.unit_max_cooldown(unit)
ind += 2
# x-y positions
entity[ind] = (unit.pos.x - center_x) / self.max_distance_x
entity[ind + 1] = (unit.pos.y - center_y) / self.max_distance_y
entity[ind + 2] = (unit.pos.x - com_x) / max_dist_com
entity[ind + 3] = (unit.pos.y - com_y) / max_dist_com
ind += 4
if self.obs_pathing_grid:
entity[
ind:ind + self.n_obs_pathing
] = self.get_surrounding_pathing(unit)
ind += self.n_obs_pathing
if self.obs_terrain_height:
entity[ind:] = self.get_surrounding_height(unit)
entities.append(entity)
for _ in range(i, self.max_n_agents + self.max_n_enemies):
entities.append(np.zeros(nf_entity, dtype=np.float32))
return entities
def get_entity_size(self):
nf_entity = self.max_n_agents + self.max_n_enemies + 2 * 0 # tag
nf_entity += self.n_actions - 2 # available actions minus those that are always available
nf_entity += self.unit_type_bits # unit type
# below are only observed for alive units (else zeros)
if self.obs_all_health or self.obs_own_health:
nf_entity += 1 + int(self.shield_bits_ally or self.shield_bits_enemy) # health and shield
nf_entity += 2 # energy and cooldown for ally units
nf_entity += 4 # global x-y coords + rel x-y to center of mass of all agents (normalized by furthest agent's distance)
if self.obs_pathing_grid:
nf_entity += self.n_obs_pathing # local pathing
if self.obs_terrain_height:
nf_entity += self.n_obs_height # local terrain
return nf_entity
def get_max_entity(self):
return self.max_n_agents + self.max_n_enemies
def get_masks(self):
"""
Returns:
1) per agent observability mask over all entities (unoberserved = 1, else 0)
3) mask of inactive entities (including enemies) over all possible entities
"""
sight_range = np.array(
[self.unit_sight_range(a_i)
for a_i in range(self.n_agents)]).reshape(-1, 1)
obs_mask = (self.dist_mtx > sight_range).astype(np.uint8)
obs_mask_padded = np.ones((self.max_n_agents,
self.max_n_agents + self.max_n_enemies),
dtype=np.uint8)
obs_mask_padded[:self.n_agents,
:self.n_agents] = obs_mask[:, :self.n_agents]
obs_mask_padded[:self.n_agents,
self.max_n_agents:self.max_n_agents + self.n_enemies] = (
obs_mask[:, self.n_agents:]
)
entity_mask = np.ones(self.max_n_agents + self.max_n_enemies,
dtype=np.uint8)
entity_mask[:self.n_agents] = 0
entity_mask[self.max_n_agents:self.max_n_agents + self.n_enemies] = 0
return obs_mask_padded, entity_mask
def _calc_distance_mtx(self):
# Calculate distances of all agents to all agents and enemies (for visibility calculations)
dist_mtx = 1000 * np.ones((self.n_agents, self.n_agents + self.n_enemies))
for i in range(self.n_agents):
for j in range(self.n_agents + self.n_enemies):
if j < i:
continue
elif j == i:
dist_mtx[i, j] = 0.0
else:
unit_a = self.agents[i]
if j >= self.n_agents:
unit_b = self.enemies[j - self.n_agents]
else:
unit_b = self.agents[j]
if unit_a.health > 0 and unit_b.health > 0:
dist = self.distance(unit_a.pos.x, unit_a.pos.y,
unit_b.pos.x, unit_b.pos.y)
dist_mtx[i, j] = dist
if j < self.n_agents:
dist_mtx[j, i] = dist
self.dist_mtx = dist_mtx
|
from __future__ import unicode_literals
import re
from builtins import object, zip
from bs4 import BeautifulSoup
class WebVTTTestingMixIn(object):
"""
Provide specialized test case capabilities for asserting on WebVTT content.
"""
def _extract_webvtt_captions(self, content):
return tuple(line.strip() for line in content.splitlines())
def assertWebVTTEquals(self, first, second):
"""
Assert that two WebVTT contents are equal.
"""
first_items = self._extract_webvtt_captions(first)
second_items = self._extract_webvtt_captions(second)
self.assertEqual(first_items, second_items)
class SRTTestingMixIn(object):
"""
Provide specialized test case capabilities for asserting on SRT content.
"""
def _extract_srt_captions(self, content):
return tuple(line.strip() for line in content.splitlines())
def assertSRTEquals(self, first, second):
"""
Assert that two SRT contents are equal.
"""
first_items = self._extract_srt_captions(first)
second_items = self._extract_srt_captions(second)
self.assertEqual(first_items, second_items)
class CaptionSetTestingMixIn(object):
def assertCaptionSetAlmostEquals(self, first, second,
tolerance_microseconds):
"""
Assert that two caption sets have equal text except for newlines,
and differences in timing that are less than tolerance_microseconds.
"""
captions_1 = first.get_captions(first.get_languages()[0])
captions_2 = second.get_captions(first.get_languages()[0])
def get_text_for_caption(caption):
text = caption.get_text()
text = re.sub(u'\\s+', u' ', text)
return text
text_1 = [get_text_for_caption(caption) for caption in captions_1]
text_2 = [get_text_for_caption(caption) for caption in captions_2]
self.assertEqual(text_1, text_2)
def close_enough(ts1, ts2):
return abs(ts1 - ts2) < tolerance_microseconds
start_differences = [
(caption_1.start, caption_2.start)
for caption_1, caption_2 in zip(captions_1, captions_2)
if not close_enough(caption_1.start, caption_2.start)
]
self.assertEqual(start_differences, [])
end_differences = [
(caption_1.end, caption_2.end)
for caption_1, caption_2 in zip(captions_1, captions_2)
if not close_enough(caption_1.end, caption_2.end)
]
self.assertEqual(end_differences, [])
class DFXPTestingMixIn(object):
"""
Provide specialized test case capabilities for asserting on DFXP content.
"""
def _remove_styling(self, soup):
for style in soup(u'styling'):
style.clear()
for paragraph in soup(u'p'):
if u'style' in paragraph.attrs:
del paragraph.attrs[u'style']
def _remove_spans(self, soup):
for span in soup(u'span'):
span.unwrap()
def _trim_text(self, soup):
for paragraph in soup(u'p'):
paragraph.string = paragraph.text.strip()
def assertDFXPEquals(self, first, second,
ignore_styling=False,
ignore_spans=False):
first_soup = BeautifulSoup(first, 'lxml')
second_soup = BeautifulSoup(second, 'lxml')
if ignore_styling:
self._remove_styling(first_soup)
self._remove_styling(second_soup)
if ignore_spans:
self._remove_spans(first_soup)
self._remove_spans(second_soup)
self._trim_text(first_soup)
self._trim_text(second_soup)
self.assertEqual(first_soup, second_soup)
class SAMITestingMixIn(object):
"""
Provide specialized test case capabilities for asserting on SAMI content.
"""
def _extract_sami_captions(self, soup):
return tuple(
(caption.attrs[u'start'], caption.p.text.strip())
for caption in soup.select(u'sync'))
def assertSAMIEquals(self, first, second):
first_soup = BeautifulSoup(first, 'lxml')
second_soup = BeautifulSoup(second, 'lxml')
first_items = self._extract_sami_captions(first_soup)
second_items = self._extract_sami_captions(second_soup)
self.assertEqual(first_items, second_items)
|
import lxml.etree as etree
from pathlib import Path
source = etree.parse("crocodile.example.xml")
xsd_doc = etree.parse("coc.creature.xsd")
# xsds_doc = etree.parse("../common/xsd/rich_text.xsd")
xsd_schema = etree.XMLSchema(xsd_doc)
xsd_schema.assert_(source)
xslt_dom = etree.parse("coc.creature.smallblock.xslt")
transform = etree.XSLT(xslt_dom)
html = transform(source)
html.write('test.html', pretty_print=True, method='html')
with open('example.html' ,'wb') as outf:
outf.write('<?xml version="1.0" encoding="utf-8"?><html><head></head><body>'.encode('utf-8'))
html.write(outf, pretty_print=True, method='html', encoding='utf-8')
outf.write('</body></html>'.encode('utf-8'))
|
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import manager
from ceilometer.i18n import _LW
from ceilometer.openstack.common import service as os_service
from ceilometer import service
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class MultiChoicesOpt(cfg.Opt):
def __init__(self, name, choices=None, **kwargs):
super(MultiChoicesOpt, self).__init__(name,
type=DeduplicatedCfgList(),
**kwargs)
self.choices = choices
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for multi choices options."""
kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group)
kwargs['nargs'] = '+'
choices = kwargs.get('choices', self.choices)
if choices:
kwargs['choices'] = choices
return kwargs
class DeduplicatedCfgList(cfg.types.List):
def __call__(self, *args, **kwargs):
result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs)
if len(result) != len(set(result)):
LOG.warning(_LW("Duplicated values: %s found in CLI options, "
"auto de-duplidated"), result)
result = list(set(result))
return result
CLI_OPTS = [
MultiChoicesOpt('polling-namespaces',
default=['compute', 'central'],
choices=['compute', 'central', 'ipmi'],
dest='polling_namespaces',
help='Polling namespace(s) to be used while '
'resource polling'),
MultiChoicesOpt('pollster-list',
default=[],
dest='pollster_list',
help='List of pollsters (or wildcard templates) to be '
'used while polling'),
]
CONF.register_cli_opts(CLI_OPTS)
def main():
service.prepare_service()
os_service.launch(manager.AgentManager(CONF.polling_namespaces,
CONF.pollster_list)).wait()
# todo(dbelova): remove it someday. Needed for backward compatibility
def main_compute():
service.prepare_service()
os_service.launch(manager.AgentManager(['compute'])).wait()
# todo(dbelova): remove it someday. Needed for backward compatibility
def main_central():
service.prepare_service()
os_service.launch(manager.AgentManager(['central'])).wait()
def main_ipmi():
service.prepare_service()
os_service.launch(manager.AgentManager(['ipmi'])).wait()
|
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.003
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Bidirectional recurrent neural network (many-to-one)
class BiRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(BiRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Linear(hidden_size * 2, num_classes) # 2 for bidirection
def forward(self, x):
# Set initial states
h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device) # 2 for bidirection
c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
model = BiRNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
# Test the model
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
"""
Django settings for importe_33537 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'importe_33537.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'importe_33537.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.