code
stringlengths 1
199k
|
|---|
from ajenti.ui import *
from ajenti.ui.binder import Binder
from api import *
@plugin
class LinuxBasicNetworkConfigSet (NetworkConfigBit):
cls = 'linux-basic'
title = 'Basic'
def init(self):
self.append(self.ui.inflate('network:bit-linux-basic'))
self.binder = Binder(None, self)
def refresh(self):
self.binder.setup(self.iface).populate()
def apply(self):
self.binder.update()
|
import base64
import json
import logging
import time
import threading
import requests
class OpenHabRestInterface(threading.Thread):
prev_state = {} # stores item states
update = False
def __init__(self, host, port, user, pwd, group, queue):
self.host = host
self.port = port
self.user = user
self.pwd = pwd
self.auth = base64.encodestring("%s:%s" % (self.user,
self.pwd)).replace("\n", "")
self.basic_header = {
"Authorization": "Basic %s" % self.auth,
"Content-type": "text/plain"
} # Header for basic connections (returns only text)
self.polling_header = {
"Authorization": "Basic %s" % self.auth,
"Accept": "application/json"
} # Header for polling (returns json object)
self.add_header = {
"Authorization": "Basic %s" % self.auth,
"Accept": "application/json",
"Content-Type": "application/json"
} # Header for adding items
# NULL Logger if none is set
self.logger = logging.getLogger("NULL")
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.NOTSET)
self.args = (group, queue)
threading.Thread.__init__(
self, target=self.poll_status, args=self.args)
# If you want logging you can set the logger here
def set_logger(self, logger_name):
self.logger = logging.getLogger(logger_name)
# Returns the state of the specified item
def get_item_state(self, item):
retval = requests.get("http://" + self.host + ":" + str(self.port) +
"/rest/items/" + item + "/state")
if retval.status_code != requests.codes.ok:
self.logger.error("GET returned: %s" % retval.status_code)
return None
else:
value = retval.text
self.prev_state[item] = value
self.logger.info(item + ": " + str(value))
return value
# Updates the state of the specified item
def update_item_state(self, item, state, no_update=False):
openhab_url = "http://%s:%s/rest/items/%s/state" % (self.host,
self.port, item)
retval = requests.put(openhab_url,
data=str(state),
headers=self.basic_header)
if retval.status_code != requests.codes.accepted:
self.logger.error("PUT returned : %s for item: %s" %
(retval.status_code, item))
return False
# Add to prev_state to prevent endless loops
if not no_update:
self.prev_state[item] = state
return True
# Polls all Members of a Group and queues new values
def poll_status(self, group, queue):
self.update = True
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, group)
param = {"type": "json"}
while self.update:
queue.join() # Wait until queue is empty
retval = requests.get(url,
params=param,
headers=self.polling_header)
if retval.status_code != requests.codes.ok:
self.logger.error("GET returned: %s for Group:%s" %
(retval.status_code, group))
time.sleep(0.5)
continue
# Get all items in the group and check for new values
for member in retval.json()["members"]:
item = member["name"]
state = member["state"]
if item in self.prev_state:
if state != self.prev_state[item]:
self.logger.debug("New State of %s: %s" %
(item, state))
queue.put({item: state})
else:
queue.put({item: state})
self.prev_state[item] = state
time.sleep(0.5)
# Add a new item to openHab
def add_item(self, name, item_type, label, category, group):
# Construct the new Item
item = {
"name": name,
"type": item_type,
"label": label,
"category": category,
"groupNames": [group]
}
item_json = json.dumps(item) # create json
# Push new Item and return success/failure
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, name)
for i in range(0, 2): # Try 3 times if failure
retval = requests.put(url, data=item_json, headers=self.add_header)
if retval.status_code != requests.codes.ok:
if i == 2:
self.logger.error("PUT returned: %s" % retval.status_code)
return False
else:
break
return True
# Delete Item from openHab
def delete_item(self, name):
url = "http://%s:%s/rest/items/%s" % (self.host, self.port, name)
retval = requests.delete(url)
if retval.status_code != requests.codes.ok:
self.logger.error("DELETE returned: %s" % retval.status_code)
return False
return True
|
from gwt.ui.ListBox import (
DOM,
Event,
Factory,
FocusWidget,
ListBox,
)
|
""" Some standard utiltilies for defining functions.
These are developer functions that are not covered by API guarantees.
"""
import pandas as pd
from ..column import DataFrame, Column, build_col_broadcast, build_col_fun, build_col_literal
from ..proto import types_pb2
from ..proto import structured_transform_pb2 as st_pb2
from .base import *
from .error import *
def check_df(df, name_hint=None):
""" Checks if the input is a dataframe, or turns it into a dataframe
if necessary (if it is a column).
"""
if isinstance(df, DataFrame):
return df
if isinstance(df, Column):
return df.as_dataframe(name_hint=name_hint)
raise CreationError("Trying to cast %s as a dataframe, which is of type %s" % (df, type(df)))
def check_type_number(dt):
p = dt.to_proto
if p.HasField("basic_type") and p.basic_type in [types_pb2.SQLType.DOUBLE, types_pb2.SQLType.INT]:
return dt
raise CreationError("Expected type to be a type number: %s" % dt)
def make_aggregator_sql(sqlname, typefun, pdfun=None, spfun=None):
"""
sqlname: the name of the sql function
typefun: datatype -> datatype
Returns a function of type (df-like, name: string) -> observable
"""
def function_karps(df, name):
df = check_df(df)
type_out = typefun(df.type)
# the proto that defines the aggregation.
p = std_pb2.StructuredReduce(agg_op=st_pb2.Aggregation(
op=st_pb2.AggregationFunction(
function_name=sqlname,
inputs=[st_pb2.ColumnExtraction(path=[])]
),
field_name=name))
return build_observable("org.spark.StructuredReduce", type_out,
parents=[df],
op_extra=p,
name_hint=sqlname,
path_extra=name)
def function_pandas(df):
# Takes the input (assumed to be a pandas dataframe or series) and
# performs the pandas operation on it.
s = _convert_pd_series(df)
return pdfun(s)
def function(df, name=None):
if isinstance(df, (DataFrame, Column)):
return function_karps(df, name)
# TODO: check for Spark
# Assume this is a python object, pass it to python:
return function_pandas(df)
# Useful for visualization of the function
function.__name__ = sqlname
return function
def make_transform_sql1(sqlname, typefun, pyfun=None, spfun=None):
""" Makes a sql transformer that accepts only one argument.
sqlname: the name of the sql function.
typefun: a function that accepts one datatype and returns a datatype.
Returns a function of type (input: col-like, name: string) -> col-like, with the following rules:
- observable -> observable
- column -> column
- dataframe -> dataframe
- python object -> python object
"""
def function_karps(obj1, name):
type_in = obj1.type
type_out = typefun(type_in)
# Get the column input for the data.
if isinstance(obj1, Column):
return Column(
ref = obj1.reference,
type_p = type_out,
function_name=sqlname,
function_deps=[obj1])
elif isinstance(obj1, (DataFrame, Observable)):
proto_in = st_pb2.Column(
extraction=st_pb2.ColumnExtraction(path=[]))
proto_out = st_pb2.Column(
function=st_pb2.ColumnFunction(
function_name=sqlname,
inputs=[proto_in]),
field_name="%s()" % sqlname) # TODO: fix the name
else:
raise CreationError("Does not understand object of type %s" % (type(obj1)))
if isinstance(obj1, DataFrame):
p = std_pb2.StructuredTransform(
col_op=proto_out)
return build_dataframe(
op_name="org.spark.StructuredTransform",
type_p=type_out,
op_extra=p,
parents=[obj1],
name_hint=sqlname,
path_extra=name)
if isinstance(obj1, Observable):
p = std_pb2.LocalStructuredTransform(
col_op=proto_out)
return build_observable(
op_name="org.spark.LocalStructuredTransform",
type_p=type_out,
op_extra=p,
parents=[obj1],
name_hint=sqlname,
path_extra=name)
def function(df, name=None):
if isinstance(df, (DataFrame, Column, Observable)):
return function_karps(df, name)
# TODO: check for Spark
# Assume this is a python object, pass it to python:
return pyfun(df)
# Useful for visualization of the function
function.__name__ = sqlname
return function
def make_transform_sql2(sqlname, typefun, pyfun=None):
""" Makes a sql transformer that accepts two arguments.
sqlname: the name of the sql function.
typefun: a function that accepts a list of datatypes and returns a datatype.
numargs: the number of arguments accepted by this transformer.
pyfun: a python function that can perform the equivalent operation (if possible).
spfun: a spark function that does the equivalent operation.
Returns a function of type (input: col-like, name: string) -> col-like, with the following rules:
- observable -> observable
- column -> column
- dataframe -> dataframe
- python object -> python object
"""
return make_transform_sql(sqlname, typefun, numArgs=2, pyfun=pyfun)
def make_transform_sql(sqlname, typefun,
numArgs=None, pyfun=None, spfun=None):
""" Makes a sql transformer that accepts a fixed number of arguments (greater than one).
sqlname: the name of the sql function.
typefun: a function that accepts a list of datatypes and returns a datatype.
numargs: the number of arguments accepted by this transformer.
pyfun: a python function that can perform the equivalent operation (if possible).
spfun: a spark function that does the equivalent operation.
Returns a function of type (input: col-like, name: string) -> col-like, with the following rules:
- observable -> observable
- column -> column
- dataframe -> dataframe
- python object -> python object
"""
if numArgs is not None and numArgs == 0:
raise CreationError("Only for sql functions that accept arguments")
# Implementation for a list of columns.
# The result is a column with the same reference.
def function_karps_col(cols, name):
types_in = [col.type for col in cols]
type_out = typefun(*types_in)
return build_col_fun(cols[0].reference, type_out, sqlname, cols)
def function_karps_obs(obss, name):
types_in = [obs.type for obs in obss]
type_out = typefun(*types_in)
def f(idx):
return st_pb2.Column(
broadcast=st_pb2.ColumnBroadcastObservable(
observable_index=idx),
field_name="_%s"%str(idx))
return build_observable(
"org.spark.LocalStructuredTransform", type_out,
op_extra=std_pb2.LocalStructuredTransform(
col_op=st_pb2.Column(
function=st_pb2.ColumnFunction(
function_name=sqlname,
inputs=[f(idx) for idx in range(len(obss))]))),
parents=obss,
name_hint=sqlname,
path_extra=name)
# def function(*objs, name=None): # TODO: this is python3 only
def function(*objs, **kwargs):
name = kwargs['name'] if 'name' in kwargs else None
if len(objs) == 0 or (numArgs is not None and len(objs) != numArgs):
raise CreationError("%s needs %s argument(s)" % (sqlname, str(numArgs)))
# We accept a couple of cases for Karps:
# - == 1 dataframe, >= 0 columns, >= 0 observables >= other -> dataframe
# - >= 1 columns, >= 0 observables, >= other -> columns
# - >= 1 observables, >= other -> obs
# - >= 1 other -> python call
dfs = [obj for obj in objs if isinstance(obj, DataFrame)]
cols = [obj for obj in objs if isinstance(obj, Column)]
obss = [obj for obj in objs if isinstance(obj, Observable)]
comps = [obj for obj in objs if is_compatible_karps(obj)]
num_df = len(dfs)
num_col = len(cols)
num_obs = len(obss)
num_comps = len(comps)
# We cannot mix and match things for now.
if not dfs and not cols and not obss:
# No spark stuff, we call the python argument for now.
pyfun(*objs)
if num_obs + num_col + num_df + num_comps != len(objs):
raise CreationError("Mixing karps objects with non karps objects")
# Same origin
bc_obj_ids = set([id(x) for x in (dfs + [col.reference for col in cols])])
if len(bc_obj_ids) > 1:
raise CreationError("More than one dataframes are being refered in this transform: dataframes: {} columns: {}".format(dfs, cols))
# If we are dealing with observables only, take a separate path, there is no reference in this case.
if not dfs and not cols:
# Using observables. All the values are translated as observables.
def convert(obs):
from ..column import observable
if isinstance(obs, Observable):
return obs
if is_compatible_karps(obs):
return observable(obs)
assert False, (type(obs), obs)
obss2 = [convert(o) for o in objs]
return function_karps_obs(obss2, name)
# Dealing with columns or dataframes.
assert dfs or cols
# Find the unique reference
if dfs:
ref = dfs[0]
else:
ref = cols[0].reference
def make_col(obj):
if isinstance(obj, DataFrame):
return obj.as_column()
if isinstance(obj, Column):
return obj
if isinstance(obj, Observable):
return build_col_broadcast(ref, obj.type, obj)
if is_compatible_karps(obj):
return build_col_literal(ref, obj)
assert False, obj
all_cols = [make_col(obj) for obj in objs]
col = function_karps_col(all_cols, name)
if dfs:
# If we have datframes mixed in, return a dataframe
return col.as_dataframe()
else:
return col
# Useful for visualization of the function
function.__name__ = sqlname
return function
def is_compatible_karps(pyobj):
# True if the object can be turned into an observable.
# For now, it is limited to primitives.
# We lack type info with null objects.
if pyobj is None:
return False
return isinstance(pyobj, (float, str, bool, int))
def _convert_pd_series(obj):
if isinstance(obj, pd.Series):
return obj
if isinstance(obj, pd.DataFrame):
df = obj
cols = list(df.columns)
if len(cols) > 1:
raise ValueError("Expected one column but the following column: {}".format(cols))
return df[cols[0]]
|
import mock
from neutron_lib import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import constants as n_const
from neutron.objects.logapi import logging_resource as log_object
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.services.logapi.common import exceptions as log_exc
from neutron.services.logapi.drivers.openvswitch \
import ovs_firewall_log as ovsfw_log
from neutron.services.logapi.rpc import agent as agent_rpc
from neutron.tests import base
from neutron.tests import tools
COOKIE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
PROJECT_ID = uuidutils.generate_uuid()
ACTION = tools.get_random_security_event()
LOG_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
REMOTE_SG_ID = uuidutils.generate_uuid()
FakeSGLogInfo = [
{
'id': LOG_ID,
'ports_log': [{'port_id': PORT_ID,
'security_group_rules': [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_TCP,
'direction': constants.INGRESS_DIRECTION,
'port_range_min': 123,
'port_range_max': 123,
'security_group_id': SG_ID},
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_UDP,
'direction': constants.EGRESS_DIRECTION,
'security_group_id': SG_ID},
{'ethertype': constants.IPv6,
'protocol': constants.PROTO_NAME_TCP,
'remote_group_id': REMOTE_SG_ID,
'direction': constants.EGRESS_DIRECTION,
'security_group_id': SG_ID}
]}],
'event': 'ALL',
'project_id': PROJECT_ID,
}
]
def set_log_driver_config(ctrl_rate_limit, ctrl_burst_limit):
cfg.CONF.set_override('rate_limit', ctrl_rate_limit, group='network_log')
cfg.CONF.set_override('burst_limit', ctrl_burst_limit, group='network_log')
class TestCookie(base.BaseTestCase):
def setUp(self):
super(TestCookie, self).setUp()
self.cookie = ovsfw_log.Cookie(COOKIE_ID, PORT_ID, ACTION, PROJECT_ID)
self.cookie.log_object_refs = set([LOG_ID])
def test_add_log_object_refs(self):
new_log_id = uuidutils.generate_uuid()
expected = set([LOG_ID, new_log_id])
self.cookie.add_log_obj_ref(new_log_id)
self.assertEqual(expected, self.cookie.log_object_refs)
def test_removed_log_object_ref(self):
expected = set()
self.cookie.remove_log_obj_ref(LOG_ID)
self.assertEqual(expected, self.cookie.log_object_refs)
def test_is_empty(self):
self.cookie.remove_log_obj_ref(LOG_ID)
result = self.cookie.is_empty
self.assertTrue(result)
class FakeOVSPort(object):
def __init__(self, name, port, mac):
self.port_name = name
self.ofport = port
self.vif_mac = mac
class TestOVSFirewallLoggingDriver(base.BaseTestCase):
def setUp(self):
super(TestOVSFirewallLoggingDriver, self).setUp()
self.log_driver = ovsfw_log.OVSFirewallLoggingDriver(mock.Mock())
resource_rpc_mock = mock.patch.object(
agent_rpc, 'LoggingApiStub', autospec=True).start()
self.log_driver.start_logapp = mock.Mock()
self.log_driver.initialize(resource_rpc_mock)
self.log_driver.SUPPORTED_LOGGING_TYPES = ['security_group']
self.mock_bridge = self.log_driver.int_br
self.mock_bridge.reset_mock()
self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00')
self.mock_bridge.br.get_vif_port_by_id.return_value = \
self.fake_ovs_port
log_data = {
'context': None,
'name': 'test1',
'id': LOG_ID,
'project_id': PROJECT_ID,
'event': 'ALL',
'resource_type': 'security_group'
}
self.log_resource = log_object.Log(**log_data)
@property
def port_ofport(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport
@property
def port_mac(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac
def test_initialize_bridge(self):
br = self.log_driver.initialize_bridge(self.mock_bridge)
self.assertEqual(self.mock_bridge.deferred.return_value, br)
def test_set_controller_rate_limit(self):
set_log_driver_config(100, 25)
self.log_driver.initialize_bridge(self.mock_bridge)
expected_calls = [mock.call.set_controller_rate_limit(100),
mock.call.set_controller_burst_limit(25)]
self.mock_bridge.assert_has_calls(expected_calls)
def test_generate_cookie(self):
cookie_id = self.log_driver.generate_cookie(
PORT_ID, ACTION, LOG_ID, PROJECT_ID)
cookie = self.log_driver._get_cookie_by_id(cookie_id)
self.assertIn(cookie, self.log_driver.cookies_table)
def test__get_cookie_by_id_not_found(self):
cookie_id = uuidutils.generate_uuid()
cookie = ovsfw_log.Cookie(cookie_id=uuidutils.generate_uuid(),
port=PORT_ID, action=ACTION,
project=PROJECT_ID)
self.log_driver.cookies_table = set([cookie])
self.assertRaises(log_exc.CookieNotFound,
self.log_driver._get_cookie_by_id,
cookie_id)
def test_start_log_with_update_or_create_log_event(self):
context = mock.Mock()
log_data = {'log_resources': [self.log_resource]}
self.log_driver.resource_rpc.get_sg_log_info_for_log_resources.\
return_value = FakeSGLogInfo
self.log_driver.start_logging(context, **log_data)
accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT')
drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP')
conj_id = self.log_driver.conj_id_map.get_conj_id(
SG_ID, REMOTE_SG_ID, constants.EGRESS_DIRECTION, constants.IPv6)
add_rules = [
# log ingress tcp port=123
mock.call(
actions='controller',
cookie=accept_cookie.id,
reg5=self.port_ofport,
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_TCP,
priority=77,
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
tcp_dst='0x007b'),
# log egress tcp6
mock.call(
actions='controller',
cookie=accept_cookie.id,
reg5=self.port_ofport,
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IPV6),
priority=70,
reg7=conj_id + 1,
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE),
# log egress udp
mock.call(
actions='controller',
cookie=accept_cookie.id,
reg5=self.port_ofport,
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_UDP,
priority=77,
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
),
# log drop
mock.call(
actions='controller',
cookie=drop_cookie.id,
priority=53,
reg5=self.port_ofport,
table=ovs_consts.DROPPED_TRAFFIC_TABLE,
)
]
self.mock_bridge.br.add_flow.assert_has_calls(
add_rules, any_order=True)
def test_stop_log_with_delete_log_event(self):
context = mock.Mock()
log_data = {'log_resources': [self.log_resource]}
self.log_driver.resource_rpc.get_sg_log_info_for_log_resources.\
return_value = FakeSGLogInfo
self.log_driver.start_logging(context, **log_data)
accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT')
drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP')
self.mock_bridge.reset_mock()
self.log_driver.stop_logging(context, **log_data)
delete_rules = [
# delete drop flow
mock.call(
table=ovs_consts.DROPPED_TRAFFIC_TABLE,
cookie=drop_cookie.id
),
# delete accept flows
mock.call(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
cookie=accept_cookie.id
),
mock.call(
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
cookie=accept_cookie.id
)
]
self.mock_bridge.br.delete_flows.assert_has_calls(
delete_rules, any_order=True)
def test_start_log_with_add_port_event(self):
context = mock.Mock()
log_data = {'port_id': PORT_ID}
self.log_driver.resource_rpc.get_sg_log_info_for_port.return_value = \
[
{
'id': uuidutils.generate_uuid(),
'ports_log': [{'port_id': PORT_ID,
'security_group_rules': [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_TCP,
'direction':
constants.INGRESS_DIRECTION,
'port_range_min': 123,
'port_range_max': 123,
'security_group_id': 456}]}],
'event': 'ACCEPT',
'project_id': PROJECT_ID,
}
]
self.log_driver.start_logging(context, **log_data)
accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT')
add_rules = [
# log ingress tcp port=123
mock.call(
actions='controller',
cookie=accept_cookie.id,
reg5=self.port_ofport,
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_TCP,
priority=77,
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
tcp_dst='0x007b')
]
self.mock_bridge.br.add_flow.assert_has_calls(
add_rules, any_order=True)
def test_stop_log_with_delete_port_event(self):
context = mock.Mock()
log_data = {'port_id': PORT_ID}
# add port
self.log_driver.resource_rpc.get_sg_log_info_for_port.return_value = \
FakeSGLogInfo
self.log_driver.start_logging(context, **log_data)
accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT')
drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP')
self.mock_bridge.reset_mock()
# delete port
self.log_driver.stop_logging(
context, port_id=PORT_ID)
delete_rules = [
# delete accept flows
mock.call(
table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE,
cookie=accept_cookie.id
),
mock.call(
table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE,
cookie=accept_cookie.id
),
# delete drop flow
mock.call(
table=ovs_consts.DROPPED_TRAFFIC_TABLE,
cookie=drop_cookie.id
),
]
self.mock_bridge.br.delete_flows.assert_has_calls(
delete_rules, any_order=True)
|
import doctest
from insights.parsers import cloud_init_log
from insights.tests import context_wrap
CLOUD_INIT_LOG = """
2019-08-07 14:33:27,269 - util.py[DEBUG]: Reading from /etc/cloud/cloud.cfg.d/99-datasource.cfg (quiet=False)
2019-08-07 14:33:27,269 - util.py[DEBUG]: Read 59 bytes from /etc/cloud/cloud.cfg.d/99-datasource.cfg
2019-08-07 14:33:27,269 - util.py[DEBUG]: Attempting to load yaml from string of length 59 with allowed root types (<type 'dict'>,)
2019-08-07 14:33:27,270 - util.py[WARNING]: Failed loading yaml blob. Invalid format at line 1 column 1: "while parsing a block mapping
""".strip()
def test_cloud_init_log():
log = cloud_init_log.CloudInitLog(context_wrap(CLOUD_INIT_LOG))
assert "Reading from /etc/cloud/cloud.cfg.d/99-datasource.cfg" in log
assert len(log.get('DEBUG')) == 3
def test_documentation():
failed_count, tests = doctest.testmod(
cloud_init_log,
globs={'log': cloud_init_log.CloudInitLog(context_wrap(CLOUD_INIT_LOG))}
)
assert failed_count == 0
|
import logging
from marvin.cloudstackAPI import *
from marvin.cloudstackTestCase import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.lib.utils import *
from nose.plugins.attrib import attr
class TestPublicIpAcl(cloudstackTestCase):
attributes = {
'template_name': 'tiny linux kvm',
'account': {
'email': 'e.cartman@southpark.com',
'firstname': 'Eric',
'lastname': 'Cartman',
'username': 'e.cartman',
'password': 'southpark'
},
'default_offerings': {
'vpc': 'Default VPC offering',
'redundant_vpc': 'Redundant VPC offering',
'network': 'DefaultIsolatedNetworkOfferingForVpcNetworks',
'virtual_machine': 'Small Instance'
},
'vpcs': {
'vpc1': {
'name': 'vpc1',
'displaytext': 'vpc1',
'cidr': '10.1.0.0/16'
}
},
'networks': {
'network1': {
'name': 'network1',
'displaytext': 'network1',
'gateway': '10.1.1.1',
'netmask': '255.255.255.0'
}
},
'vms': {
'vm1': {
'name': 'vm1',
'displayname': 'vm1'
}
},
'nat_rule': {
'protocol': 'TCP',
'publicport': 22,
'privateport': 22
},
'acls': {
'acl1': {
'name': 'acl1',
'description': 'acl1',
'entries': {
'entry1': {
'protocol': 'TCP',
'action': 'Allow',
'traffictype': 'Ingress',
'startport': 22,
'endport': 22
}
}
},
'acl2': {
'name': 'acl2',
'description': 'acl2',
'entries': {
'entry2': {
'protocol': 'TCP',
'action': 'Deny',
'traffictype': 'Ingress',
'startport': 22,
'endport': 22
}
}
}
}
}
@classmethod
def setUpClass(cls):
cls.test_client = super(TestPublicIpAcl, cls).getClsTestClient()
cls.api_client = cls.test_client.getApiClient()
cls.class_cleanup = []
cls.logger = logging.getLogger('TestPublicIpAcl')
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(logging.StreamHandler())
@classmethod
def setup_infra(cls, redundant=False):
if len(cls.class_cleanup) > 0:
cleanup_resources(cls.api_client, cls.class_cleanup, cls.logger)
cls.class_cleanup = []
cls.zone = get_zone(cls.api_client, cls.test_client.getZoneForTests())
cls.logger.debug("[TEST] Zone '%s' selected" % cls.zone.name)
cls.domain = get_domain(cls.api_client)
cls.logger.debug("[TEST] Domain '%s' selected" % cls.domain.name)
cls.template = get_template(
cls.api_client,
cls.zone.id,
template_name=cls.attributes['template_name'])
cls.logger.debug("[TEST] Template '%s' selected" % cls.template.name)
cls.account = Account.create(
cls.api_client,
cls.attributes['account'],
admin=True,
domainid=cls.domain.id)
cls.class_cleanup += [cls.account]
cls.logger.debug("[TEST] Account '%s' created", cls.account.name)
cls.vpc_offering = cls.get_default_redundant_vpc_offering() if redundant else cls.get_default_vpc_offering()
cls.logger.debug("[TEST] VPC Offering '%s' selected", cls.vpc_offering.name)
cls.network_offering = cls.get_default_network_offering()
cls.logger.debug("[TEST] Network Offering '%s' selected", cls.network_offering.name)
cls.virtual_machine_offering = cls.get_default_virtual_machine_offering()
cls.logger.debug("[TEST] Virtual Machine Offering '%s' selected", cls.virtual_machine_offering.name)
cls.default_allow_acl = cls.get_default_acl('default_allow')
cls.logger.debug("[TEST] ACL '%s' selected", cls.default_allow_acl.name)
cls.default_deny_acl = cls.get_default_acl('default_deny')
cls.logger.debug("[TEST] ACL '%s' selected", cls.default_deny_acl.name)
cls.vpc1 = VPC.create(cls.api_client,
cls.attributes['vpcs']['vpc1'],
vpcofferingid=cls.vpc_offering.id,
zoneid=cls.zone.id,
domainid=cls.domain.id,
account=cls.account.name)
cls.logger.debug("[TEST] VPC '%s' created, CIDR: %s", cls.vpc1.name, cls.vpc1.cidr)
cls.network1 = Network.create(cls.api_client,
cls.attributes['networks']['network1'],
networkofferingid=cls.network_offering.id,
aclid=cls.default_allow_acl.id,
vpcid=cls.vpc1.id,
zoneid=cls.zone.id,
domainid=cls.domain.id,
accountid=cls.account.name)
cls.logger.debug("[TEST] Network '%s' created, CIDR: %s, Gateway: %s", cls.network1.name, cls.network1.cidr, cls.network1.gateway)
cls.vm1 = VirtualMachine.create(cls.api_client,
cls.attributes['vms']['vm1'],
templateid=cls.template.id,
serviceofferingid=cls.virtual_machine_offering.id,
networkids=[cls.network1.id],
zoneid=cls.zone.id,
domainid=cls.domain.id,
accountid=cls.account.name)
cls.logger.debug("[TEST] VM '%s' created, Network: %s, IP %s", cls.vm1.name, cls.network1.name, cls.vm1.nic[0].ipaddress)
cls.public_ip1 = PublicIPAddress.create(cls.api_client,
zoneid=cls.zone.id,
domainid=cls.account.domainid,
accountid=cls.account.name,
vpcid=cls.vpc1.id,
networkid=cls.network1.id)
cls.logger.debug("[TEST] Public IP '%s' acquired, VPC: %s, Network: %s", cls.public_ip1.ipaddress.ipaddress, cls.vpc1.name, cls.network1.name)
cls.nat_rule1 = NATRule.create(cls.api_client,
cls.vm1,
cls.attributes['nat_rule'],
vpcid=cls.vpc1.id,
networkid=cls.network1.id,
ipaddressid=cls.public_ip1.ipaddress.id)
cls.logger.debug("[TEST] Port Forwarding Rule '%s (%s) %s => %s' created",
cls.nat_rule1.ipaddress,
cls.nat_rule1.protocol,
cls.nat_rule1.publicport,
cls.nat_rule1.privateport)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls.class_cleanup, cls.logger)
except Exception as e:
raise Exception("Exception: %s" % e)
def setUp(self):
self.method_cleanup = []
def tearDown(self):
try:
cleanup_resources(self.api_client, self.method_cleanup, self.logger)
except Exception as e:
raise Exception("Exception: %s" % e)
def test_acls(self, first_time_retries=2):
self.define_acl(self.default_allow_acl)
self.test_connectivity(retries=first_time_retries)
self.define_acl(self.default_deny_acl)
self.test_no_connectivity()
self.define_custom_acl('acl1', 'entry1')
self.test_connectivity()
self.define_custom_acl('acl2', 'entry2')
self.test_no_connectivity()
self.define_acl(self.default_allow_acl)
self.test_connectivity()
@attr(tags=['advanced'], required_hardware='true')
def test_01(self):
self.setup_infra(redundant=False)
self.test_acls(first_time_retries=10)
@attr(tags=['advanced'], required_hardware='true')
def test_02(self):
self.cleanup_vpc()
self.test_acls()
@attr(tags=['advanced'], required_hardware='true')
def test_03(self):
self.setup_infra(redundant=True)
self.test_acls(first_time_retries=10)
@attr(tags=['advanced'], required_hardware='true')
def test_04(self):
self.cleanup_vpc()
self.test_acls()
@attr(tags=['advanced'], required_hardware='true')
def test_05(self):
self.stop_master_router(self.vpc1)
self.test_acls()
def test_connectivity(self, retries=2):
try:
self.vm1.get_ssh_client(ipaddress=self.public_ip1.ipaddress.ipaddress, reconnect=True, retries=retries)
self.logger.debug('[TEST] Ensure connectivity: OK')
except Exception as e:
raise Exception("Exception: %s" % e)
def test_no_connectivity(self):
failed = False
try:
self.vm1.get_ssh_client(ipaddress=self.public_ip1.ipaddress.ipaddress, reconnect=True, retries=2)
except Exception as e:
self.logger.debug('[TEST] Ensure no connectivity: OK')
failed = True
self.assertTrue(failed)
def cleanup_vpc(self):
self.logger.debug("[TEST] Restarting VPC '%s' with 'cleanup=True'", self.vpc1.name)
self.vpc1.restart(self.api_client, True)
self.logger.debug("[TEST] VPC '%s' restarted", self.vpc1.name)
def define_acl(self, acl):
try:
command = replaceNetworkACLList.replaceNetworkACLListCmd()
command.aclid = acl.id
command.publicipid = self.public_ip1.ipaddress.id
response = self.api_client.replaceNetworkACLList(command)
except Exception as e:
raise Exception("Exception: %s" % e)
self.assertTrue(response.success)
self.logger.debug("[TEST] Public IP '%s' ACL replaced with '%s'", self.public_ip1.ipaddress.ipaddress, acl.name)
def define_custom_acl(self, acl_config, acl_entry_config):
acl = NetworkACLList.create(self.api_client,
self.attributes['acls'][acl_config],
vpcid=self.vpc1.id)
NetworkACL.create(self.api_client,
self.attributes['acls'][acl_config]['entries'][acl_entry_config],
networkid=self.network1.id,
aclid=acl.id)
self.define_acl(acl)
def stop_master_router(self, vpc):
self.logger.debug("[TEST] Stopping Master Router of VPC '%s'...", vpc.name)
routers = list_routers(self.api_client, domainid=self.domain.id, account=self.account.name, vpcid=vpc.id)
for router in routers:
if router.redundantstate == 'MASTER':
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
cmd.forced = 'true'
self.api_client.stopRouter(cmd)
break
for router in routers:
if router.state == 'Running':
hosts = list_hosts(self.api_client, zoneid=router.zoneid, type='Routing', state='Up', id=router.hostid)
self.assertTrue(isinstance(hosts, list))
host = next(iter(hosts or []), None)
try:
host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
get_process_status(host.ipaddress, 22, host.user, host.passwd, router.linklocalip, "sh /opt/cloud/bin/checkrouter.sh ")
except KeyError as e:
raise Exception("Exception: %s" % e)
self.logger.debug("[TEST] Master Router of VPC '%s' stopped", vpc.name)
@classmethod
def get_default_vpc_offering(cls):
offerings = list_vpc_offerings(cls.api_client)
offerings = [offering for offering in offerings if offering.name == cls.attributes['default_offerings']['vpc']]
return next(iter(offerings or []), None)
@classmethod
def get_default_redundant_vpc_offering(cls):
offerings = list_vpc_offerings(cls.api_client)
offerings = [offering for offering in offerings if offering.name == cls.attributes['default_offerings']['redundant_vpc']]
return next(iter(offerings or []), None)
@classmethod
def get_default_network_offering(cls):
offerings = list_network_offerings(cls.api_client)
offerings = [offering for offering in offerings if offering.name == cls.attributes['default_offerings']['network']]
return next(iter(offerings or []), None)
@classmethod
def get_default_virtual_machine_offering(cls):
offerings = list_service_offering(cls.api_client)
offerings = [offering for offering in offerings if offering.name == cls.attributes['default_offerings']['virtual_machine']]
return next(iter(offerings or []), None)
@classmethod
def get_default_acl(cls, name):
acls = NetworkACLList.list(cls.api_client)
acls = [acl for acl in acls if acl.name == name]
return next(iter(acls or []), None)
@classmethod
def get_default_allow_vpc_acl(cls, vpc): # check if it's better to get the ACL from the VPC
acls = NetworkACLList.list(cls.api_client, vpcid=vpc.id)
acls = [acl for acl in acls if acl.name == 'default_allow']
return next(iter(acls or []), None)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, dyndep, test_util
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/warpctc:ctc_ops')
workspace.GlobalInit(["python"])
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
[
[[0.1, 0.6, 0.1, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1]],
]
).reshape(T, N, alphabet_size).astype(np.float32)
labels = np.asarray([1, 2]).astype(np.int32).reshape(T)
label_lengths = np.asarray([2]).astype(np.int32).reshape(N)
input_lengths = np.asarray([T]).astype(np.int32)
net = core.Net("test-net")
output_blobs = ["costs", "workspace"] if is_test \
else ["inputs_grad_to_be_copied", "costs", "workspace"]
net.CTC(["inputs", "labels", "label_lengths", "input_lengths"],
output_blobs,
is_test=is_test,
device_option=device_option)
if not is_test:
net.AddGradientOperators(["costs"])
self.ws.create_blob("inputs").feed(inputs, device_option=device_option)
self.ws.create_blob("labels").feed(labels)
self.ws.create_blob("label_lengths").feed(label_lengths)
self.ws.create_blob("input_lengths").feed(input_lengths)
self.ws.run(net)
probs = softmax(inputs)
expected = probs[0, 0, 1] * probs[1, 0, 2]
self.assertEqual(self.ws.blobs["costs"].fetch().shape, (N,))
self.assertEqual(self.ws.blobs["costs"].fetch().dtype, np.float32)
cost = self.ws.blobs["costs"].fetch()[0]
print(cost)
self.assertAlmostEqual(np.exp(-cost), expected)
if not is_test:
# Make sure inputs_grad was added by AddGradientOperators and
# it is equal to the inputs_grad_to_be_copied blob returned by CTCop
assert np.array_equal(
self.ws.blobs["inputs_grad"].fetch(),
self.ws.blobs["inputs_grad_to_be_copied"].fetch()
)
def test_ctc_cost_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=False)
def test_ctc_cost_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
cuda_gpu_id=0),
is_test=False)
def test_ctc_forward_only_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=True)
def test_ctc_forward_only_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
cuda_gpu_id=0),
is_test=True)
|
"""Model Card Data Class.
The Model Card (MC) is the document designed for transparent reporting of AI
model provenance, usage, and ethics-informed evaluation. The model card can be
presented by different formats (e.g. HTML, PDF, Markdown). The properties of
the Model Card (MC) are defined by a json schema. The ModelCard class in the
ModelCardsToolkit serves as an API to read and write MC properties by the users.
"""
import dataclasses
import json as json_lib
from typing import Any, Dict, List, Optional, Union
from model_card_toolkit.base_model_card_field import BaseModelCardField
from model_card_toolkit.proto import model_card_pb2
from model_card_toolkit.utils import validation
@dataclasses.dataclass
class Owner(BaseModelCardField):
"""The information about owners of a model.
Attributes:
name: The name of the model owner.
contact: The contact information for the model owner or owners. These could
be individual email addresses, a team mailing list expressly, or a
monitored feedback form.
"""
name: Optional[str] = None
contact: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Owner)] = model_card_pb2.Owner
@dataclasses.dataclass
class Version(BaseModelCardField):
"""The information about verions of a model.
If there are multiple versions of the model, or there may be in the future,
it’s useful for your audience to know which version of the model is
discussed
in the Model Card. If there are previous versions of this model, briefly
describe how this version is different. If no more than one version of the
model will be released, this field may be omitted.
Attributes:
name: The name of the version.
date: The date this version was released.
diff: The changes from the previous version.
"""
name: Optional[str] = None
date: Optional[str] = None
diff: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Version)] = model_card_pb2.Version
@dataclasses.dataclass
class License(BaseModelCardField):
"""The license information for a model.
Attributes:
identifier: A standard SPDX license identifier (https://spdx.org/licenses/),
or "proprietary" for an unlicensed Module.
custom_text: The text of a custom license.
"""
identifier: Optional[str] = None
custom_text: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.License)] = model_card_pb2.License
@dataclasses.dataclass
class Reference(BaseModelCardField):
"""Reference for a model.
Attributes:
reference: A reference to a resource.
"""
reference: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Reference)] = model_card_pb2.Reference
@dataclasses.dataclass
class Citation(BaseModelCardField):
"""A citation for a model.
Attributes:
style: The citation style, such as MLA, APA, Chicago, or IEEE.
citation: the citation.
"""
style: Optional[str] = None
citation: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Citation)] = model_card_pb2.Citation
@dataclasses.dataclass
class ModelDetails(BaseModelCardField):
"""This section provides a general, high-level description of the model.
Attributes:
name: The name of the model.
overview: A description of the model card.
documentation: A more thorough description of the model and its usage.
owners: The individuals or teams who own the model.
version: The version of the model.
licenses: The license information for the model. If the model is licensed
for use by others, include the license type. If the model is not licensed
for future use, you may state that here as well.
references: Provide any additional links the reader may need. You can link
to foundational research, technical documentation, or other materials that
may be useful to your audience.
citations: How should the model be cited? If the model is based on published
academic research, cite the research.
path: The path where the model is stored.
"""
name: Optional[str] = None
overview: Optional[str] = None
documentation: Optional[str] = None
owners: List[Owner] = dataclasses.field(default_factory=list)
version: Optional[Version] = dataclasses.field(default_factory=Version)
licenses: List[License] = dataclasses.field(default_factory=list)
references: List[Reference] = dataclasses.field(default_factory=list)
citations: List[Citation] = dataclasses.field(default_factory=list)
path: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelDetails)] = model_card_pb2.ModelDetails
@dataclasses.dataclass
class Graphic(BaseModelCardField):
"""A named inline plot.
Attributes:
name: The name of the graphic.
image: The image string encoded as a base64 string.
"""
name: Optional[str] = None
image: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Graphic)] = model_card_pb2.Graphic
@dataclasses.dataclass
class GraphicsCollection(BaseModelCardField):
"""A collection of graphics.
Each ```graphic``` in the ```collection``` field has both a ```name``` and
an ```image```. For instance, you might want to display a graph showing the
number of examples belonging to each class in your training dataset:
```python
model_card.model_parameters.data.train.graphics.collection = [
{'name': 'Training Set Size', 'image': training_set_size_barchart},
]
```
Then, provide a description of the graph:
```python
model_card.model_parameters.data.train.graphics.description = (
'This graph displays the number of examples belonging to each class ',
'in the training dataset. ')
```
Attributes:
description: The description of graphics.
collection: A collection of graphics.
"""
description: Optional[str] = None
collection: List[Graphic] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.GraphicsCollection)] = model_card_pb2.GraphicsCollection
@dataclasses.dataclass
class SensitiveData(BaseModelCardField):
"""Sensitive data, such as PII (personally-identifiable information).
Attributes:
sensitive_data: A description of any sensitive data that may be present in a
dataset. Be sure to note PII information such as names, addresses, phone
numbers, etc. Preferably, such info should be scrubbed from a dataset if
possible. Note that even non-identifying information, such as zip code,
age, race, and gender, can be used to identify individuals when
aggregated. Please describe any such fields here.
"""
sensitive_data: List[str] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.SensitiveData)] = model_card_pb2.SensitiveData
@dataclasses.dataclass
class Dataset(BaseModelCardField):
"""Provide some information about a dataset used to generate a model.
Attributes:
name: The name of the dataset.
description: The description of dataset.
link: A link to the dataset.
sensitive: Does this dataset contain human or other sensitive data?
graphics: Visualizations of the dataset.
"""
name: Optional[str] = None
description: Optional[str] = None
link: Optional[str] = None
sensitive: Optional[SensitiveData] = dataclasses.field(
default_factory=SensitiveData)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Dataset)] = model_card_pb2.Dataset
@dataclasses.dataclass
class KeyVal(BaseModelCardField):
"""A generic key-value pair.
Attributes:
key: The key of the key-value pair.
value: The value of the key-value pair.
"""
key: Optional[str] = None
value: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.KeyVal)] = model_card_pb2.KeyVal
@dataclasses.dataclass
class ModelParameters(BaseModelCardField):
"""Parameters for construction of the model.
Attributes:
model_architecture: specifies the architecture of your model.
data: specifies the datasets used to train and evaluate your model.
input_format: describes the data format for inputs to your model.
input_format_map: describes the data format for inputs to your model, in
key-value format.
output_format: describes the data format for outputs from your model.
output_format_map: describes the data format for outputs to your model, in
key-value format
"""
model_architecture: Optional[str] = None
data: List[Dataset] = dataclasses.field(default_factory=list)
input_format: Optional[str] = None
input_format_map: List[KeyVal] = dataclasses.field(default_factory=list)
output_format: Optional[str] = None
output_format_map: List[KeyVal] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelParameters)] = model_card_pb2.ModelParameters
@dataclasses.dataclass
class ConfidenceInterval(BaseModelCardField):
"""The confidence interval of the metric.
Attributes:
lower_bound: The lower bound of the performance metric.
upper_bound: The upper bound of the performance metric.
"""
lower_bound: Optional[str] = None
upper_bound: Optional[str] = None
_proto_type: dataclasses.InitVar[BaseModelCardField._get_type(
model_card_pb2.ConfidenceInterval)] = model_card_pb2.ConfidenceInterval
@dataclasses.dataclass
class PerformanceMetric(BaseModelCardField):
"""The details of the performance metric.
Attributes:
type: What performance metric are you reporting on?
value: What is the value of this performance metric?
slice: What slice of your data was this metric computed on?
confidence_interval: The confidence interval of the metric.
"""
type: Optional[str] = None
value: Optional[str] = None
slice: Optional[str] = None
confidence_interval: ConfidenceInterval = dataclasses.field(
default_factory=ConfidenceInterval)
_proto_type: dataclasses.InitVar[BaseModelCardField._get_type(
model_card_pb2.PerformanceMetric)] = model_card_pb2.PerformanceMetric
@dataclasses.dataclass
class QuantitativeAnalysis(BaseModelCardField):
"""The quantitative analysis of a model.
Identify relevant performance metrics and display values. Let’s say you’re
interested in displaying the accuracy and false positive rate (FPR) of a
cat vs. dog classification model. Assuming you have already computed both
metrics, both overall and per-class, you can specify metrics like so:
```python
model_card.quantitative_analysis.performance_metrics = [
{'type': 'accuracy', 'value': computed_accuracy},
{'type': 'accuracy', 'value': cat_accuracy, 'slice': 'cat'},
{'type': 'accuracy', 'value': dog_accuracy, 'slice': 'dog'},
{'type': 'fpr', 'value': computed_fpr},
{'type': 'fpr', 'value': cat_fpr, 'slice': 'cat'},
{'type': 'fpr', 'value': dog_fpr, 'slice': 'dog'},
]
```
Attributes:
performance_metrics: The performance metrics being reported.
graphics: A collection of visualizations of model performance.
"""
performance_metrics: List[PerformanceMetric] = dataclasses.field(
default_factory=list)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(model_card_pb2.QuantitativeAnalysis
)] = model_card_pb2.QuantitativeAnalysis
@dataclasses.dataclass
class User(BaseModelCardField):
"""A type of user for a model.
Attributes:
description: A description of a user.
"""
description: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.User)] = model_card_pb2.User
@dataclasses.dataclass
class UseCase(BaseModelCardField):
"""A type of use case for a model.
Attributes:
description: A description of a use case.
"""
description: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.UseCase)] = model_card_pb2.UseCase
@dataclasses.dataclass
class Limitation(BaseModelCardField):
"""A limitation a model.
Attributes:
description: A description of the limitation.
"""
description: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Limitation)] = model_card_pb2.Limitation
@dataclasses.dataclass
class Tradeoff(BaseModelCardField):
"""A tradeoff for a model.
Attributes:
description: A description of the tradeoff.
"""
description: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Tradeoff)] = model_card_pb2.Tradeoff
@dataclasses.dataclass
class Risk(BaseModelCardField):
"""Information about risks involved when using the model.
Attributes:
name: The name of the risk.
mitigation_strategy: A mitigation strategy that you've implemented, or one
that you suggest to users.
"""
name: Optional[str] = None
mitigation_strategy: Optional[str] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Risk)] = model_card_pb2.Risk
@dataclasses.dataclass
class Considerations(BaseModelCardField):
"""Considerations related to model construction, training, and application.
The considerations section includes qualitative information about your model,
including some analysis of its risks and limitations. As such, this section
usually requires careful consideration, and conversations with many relevant
stakeholders, including other model developers, dataset producers, and
downstream users likely to interact with your model, or be affected by its
outputs.
Attributes:
users: Who are the intended users of the model? This may include
researchers, developers, and/or clients. You might also include
information about the downstream users you expect to interact with your
model.
use_cases: What are the intended use cases of the model? What use cases are
out-of-scope?
limitations: What are the known limitations of the model? This may include
technical limitations, or conditions that may degrade model performance.
tradeoffs: What are the known accuracy/performance tradeoffs for the model?
ethical_considerations: What are the ethical risks involved in application
of this model? For each risk, you may also provide a mitigation strategy
that you've implemented, or one that you suggest to users.
"""
users: List[User] = dataclasses.field(default_factory=list)
use_cases: List[UseCase] = dataclasses.field(default_factory=list)
limitations: List[Limitation] = dataclasses.field(default_factory=list)
tradeoffs: List[Tradeoff] = dataclasses.field(default_factory=list)
ethical_considerations: List[Risk] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Considerations)] = model_card_pb2.Considerations
@dataclasses.dataclass
class ModelCard(BaseModelCardField):
"""Fields used to generate the Model Card.
Attributes:
model_details: Descriptive metadata for the model.
model_parameters: Technical metadata for the model.
quantitative_analysis: Quantitative analysis of model performance.
considerations: Any considerations related to model construction, training,
and application.
"""
model_details: ModelDetails = dataclasses.field(default_factory=ModelDetails)
model_parameters: ModelParameters = dataclasses.field(
default_factory=ModelParameters)
quantitative_analysis: QuantitativeAnalysis = dataclasses.field(
default_factory=QuantitativeAnalysis)
considerations: Considerations = dataclasses.field(
default_factory=Considerations)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelCard)] = model_card_pb2.ModelCard
def to_json(self) -> str:
"""Write ModelCard to JSON."""
model_card_dict = self.to_dict()
model_card_dict[
validation
.SCHEMA_VERSION_STRING] = validation.get_latest_schema_version()
return json_lib.dumps(model_card_dict, indent=2)
def from_json(self, json_dict: Dict[str, Any]) -> None:
"""Reads ModelCard from JSON.
This function will overwrite all existing ModelCard fields.
Args:
json_dict: A JSON dict from which to populate fields in the model card
schema.
Raises:
JSONDecodeError: If `json_dict` is not a valid JSON string.
ValidationError: If `json_dict` does not follow the model card JSON
schema.
ValueError: If `json_dict` contains a value not in the class or schema
definition.
"""
validation.validate_json_schema(json_dict)
self.clear()
self._from_json(json_dict, self)
def merge_from_json(self, json: Union[Dict[str, Any], str]) -> None:
"""Reads ModelCard from JSON.
This function will only overwrite ModelCard fields specified in the JSON.
Args:
json: A JSON object from whichto populate fields in the model card. This
can be provided as either a dictionary or a string.
Raises:
JSONDecodeError: If `json_dict` is not a valid JSON string.
ValidationError: If `json_dict` does not follow the model card JSON
schema.
ValueError: If `json_dict` contains a value not in the class or schema
definition.
"""
if isinstance(json, str):
json = json_lib.loads(json)
validation.validate_json_schema(json)
self._from_json(json, self)
|
"""Generate a channel token for GAE's channels API.
This is called via an Ajax request.
"""
__author__ = \
'jeffy@google.com (Jeff Posnick) and jjinux@google.com (JJ Behrens)'
from google.appengine.api import channel
from playlistpicker.handlers.basehandler import BaseHandler
from playlistpicker.utils import channels as channelutils
from playlistpicker.utils import memcache as memcacheutils
from playlistpicker.utils import web as webutils
class GenerateChannelTokenHandler(BaseHandler):
@BaseHandler.oauth2_decorator.oauth_required
@BaseHandler.authorize_playlist
def post(self, playlist_id):
"""Return a channel token."""
channel_id = channelutils.create_channel_id()
memcacheutils.update_memcache(channel_id, playlist_id,
self.current_user_id,
self.current_display_name, self.people)
webutils.render_json_to_response(
self, dict(channelToken=channel.create_channel(channel_id)))
|
NetworkOfferingId = '8d648af4-ceb9-4640-bded-7e2ce3a7a699'
ZoneId = 'b9a0dc27-cea5-4602-9bdc-eef68412df46'
VMServiceOfferingId = '12e34e93-e786-48f6-a66c-8b3f9aa3e0ff'
TemplateId = '7f233e45-c9de-40d7-900e-496758ee3c50'
DisplayText = 'Just a test.'
|
"""
/***********************************
Copyright 2020 Ravishankar Mathur
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
***********************************/
"""
import math
import numpy as np
import OFInterfaces.PyOF as PyOF
def dcmToQuat(dcm):
# check for largest term
q_sq = np.zeros(4)
q_sq[0] = (1 + 2*dcm[0][0] - np.trace(dcm))/4
q_sq[1] = (1 + 2*dcm[1][1] - np.trace(dcm))/4
q_sq[2] = (1 + 2*dcm[2][2] - np.trace(dcm))/4
q_sq[3] = (1 + np.trace(dcm))/4
idx = np.argmax(q_sq)
q = np.zeros(4)
if idx == 0:
q[0] = math.sqrt(q_sq[0])
q[1] = (dcm[0][1] + dcm[1][0])/(4 * q[0])
q[2] = (dcm[2][0] + dcm[0][2])/(4 * q[0])
q[3] = (dcm[1][2] - dcm[2][1])/(4 * q[0])
elif idx == 1:
q[1] = math.sqrt(q_sq[1])
q[0] = (dcm[0][1] + dcm[1][0])/(4 * q[1])
q[2] = (dcm[1][2] + dcm[2][1])/(4 * q[1])
q[3] = (dcm[2][0] - dcm[0][2])/(4 * q[1])
elif idx == 2:
q[2] = math.sqrt(q_sq[2])
q[0] = (dcm[2][0] + dcm[0][2])/(4 * q[2])
q[1] = (dcm[1][2] + dcm[2][1])/(4 * q[2])
q[3] = (dcm[0][1] - dcm[1][0])/(4 * q[2])
else:
q[3] = math.sqrt(q_sq[3])
q[0] = (dcm[1][2] - dcm[2][1])/(4 * q[3])
q[1] = (dcm[2][0] - dcm[0][2])/(4 * q[3])
q[2] = (dcm[0][1] - dcm[1][0])/(4 * q[3])
# Enforce norm
q /= np.linalg.norm(q)
return q
def getAttitudeQuat(eye, center, up):
eye = np.array([PyOF.getOsgVec3d(eye, i) for i in range(3)])
center = np.array([PyOF.getOsgVec3d(center, i) for i in range(3)])
up = np.array([PyOF.getOsgVec3d(up, i) for i in range(3)])
f = center - eye
f /= np.linalg.norm(f)
s = np.cross(f, up)
s /= np.linalg.norm(s)
u = np.cross(s, f)
u /= np.linalg.norm(u)
mat = np.column_stack((s, u, -f));
q = dcmToQuat(mat)
# get inverse
q = PyOF.osgQuat(-q[0], -q[1], -q[2], q[3])
return q
myWindow = PyOF.WindowProxy(30, 30, 1280, 720, 1, 1, False, False);
root = PyOF.ReferenceFrame("Root");
view = PyOF.View(root, root);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewDistance(15.0);
view.resetView();
customCone = PyOF.PolyhedralCone("Custom Cone");
customCone.setConeColor(0.5, 0.5, 0.5, 0.5);
customCone.setConeLength(5.0);
root.addChild(customCone);
view = PyOF.View(root, customCone);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewParameters(PyOF.osgVec3d(0, 0, 5.0), PyOF.osgVec3d(0,0,0), PyOF.osgVec3d(0, 1.0, 0));
view.resetView();
clockAngles = [10.0, 30.0, 90.0, 180.0, 270.0]
clockAngles = PyOF.AngleArray([angle * math.pi/180 for angle in clockAngles])
coneAngles = [10.0, 30.0, 40.0, 60.0, 30.0]
coneAngles = PyOF.AngleArray([angle * math.pi/180 for angle in coneAngles])
customCone.setVertexAngles(clockAngles, coneAngles);
origin = PyOF.osgVec3d(-10, 0, 0); # Cone apex location
direction = PyOF.osgVec3d(0, 0, 1); # Cone boresight direction
up = PyOF.osgVec3d(1, 0, 0); # Cone +Y axis
customCone.setPosition(origin);
q = getAttitudeQuat(PyOF.osgVec3d(0, 0, 0), direction, up)
customCone.setAttitude(q);
ellipticCone = PyOF.EllipticCone("Elliptic Cone");
ellipticCone.setConeColor(0.1, 0.5, 0.6, 0.5);
ellipticCone.setConeLength(5.0);
ellipticCone.setPrimaryAngles(45.0 * math.pi/180, 20.0 * math.pi/180);
root.addChild(ellipticCone);
view = PyOF.View(root, ellipticCone);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewParameters(PyOF.osgVec3d(0, 0, 5.0), PyOF.osgVec3d(0,0,0), PyOF.osgVec3d(0, 1.0, 0));
view.resetView();
origin = PyOF.osgVec3d(10, 0, 0); # Cone apex location
direction = PyOF.osgVec3d(0, 1, 0); # Cone boresight direction
up = PyOF.osgVec3d(1, 0, 1); # Cone +Y axis
ellipticCone.setPosition(origin);
q = getAttitudeQuat(PyOF.osgVec3d(0, 0, 0), direction, up)
ellipticCone.setAttitude(q);
rectangularCone = PyOF.RectangularCone("Rectangular Cone");
rectangularCone.setPosition(0, 0, 10.0);
rectangularCone.setConeColor(0.1, 0.5, 0.6, 0.5);
rectangularCone.setConeLength(5.0);
rectangularCone.setPrimaryAngles(45.0 * math.pi/180, 20.0 * math.pi/180);
root.addChild(rectangularCone);
fm = PyOF.FrameManager();
fm.setFrame(root);
myWindow.setScene(fm, 0, 0);
myWindow.startThread(); # Start window animation
myWindow.join(); # Wait for window animation to finish
|
"""Tests for the profile page."""
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class SignupTest(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
# Sign in can't be inside an html tag, but can appear inside js code
response.mustcontain(no=['Logout', '>Sign in'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 302)
self.assertIn('Logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('abc@example.com', username='abc')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': True,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': False,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': True,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': True,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': False,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(editor_id),
{
'can_receive_email_updates': False,
'can_receive_editor_role_email': (
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE),
'can_receive_feedback_message_email': (
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
})
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
response = self.testapp.get(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME), expect_errors=True
)
self.assertEqual(response.status_int, 404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
# Every user must have a profile picture.
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
user_services.DEFAULT_IDENTICON_DATA_URL)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_preference_page_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
original_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(
['en'], original_preferences['preferred_language_codes'])
self.assertIsNone(original_preferences['preferred_site_language_code'])
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_site_language_code', 'data': 'en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_language_codes', 'data': ['de']},
csrf_token=csrf_token)
new_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(new_preferences['preferred_language_codes'], ['de'])
self.assertEqual(new_preferences['preferred_site_language_code'], 'en')
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['editor', 'editing']},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['viewer', 'viewing']},
csrf_token=csrf_token)
self.logout()
# Viewer looks at editor's profile page.
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Looged-out user looks at editor's profile page/
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = 'abc123@gmail.com'
def test_contribution_msec(self):
# Test the contribution time shows up correctly as None.
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertIsNone(response_dict['first_contribution_msec'])
# Update the first_contribution_msec to the current time in
# milliseconds.
first_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, first_time_in_msecs)
# Test the contribution date correctly changes to current_time_in_msecs.
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
# Test that the contribution date is not changed after the first time it
# is set.
second_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, second_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
class UserContributionsTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = 'a@example.com'
USERNAME_B = 'b'
EMAIL_B = 'b@example.com'
EXP_ID_1 = 'exp_id_1'
def test_null_case(self):
# Check that the profile page for a user with no contributions shows
# that they have 0 created/edited explorations.
self.signup(self.EMAIL_A, self.USERNAME_A)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(
response_dict['created_exp_summary_dicts'], [])
self.assertEqual(
response_dict['edited_exp_summary_dicts'], [])
def test_created(self):
# Check that the profile page for a user who has created
# a single exploration shows 1 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a_id, self.EXP_ID_1)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 1)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['created_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
def test_edited(self):
# Check that the profile page for a user who has created
# a single exploration shows 0 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a_id, self.EXP_ID_1)
exp_services.update_exploration(user_b_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
}], 'Test edit')
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_B)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 0)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['objective'],
'the objective')
class SiteLanguageHandlerTests(test_utils.GenericTestBase):
def test_save_site_language_handler(self):
"""Test the language is saved in the preferences when handler is called.
"""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
language_code = 'es'
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('/preferenceshandler/data', {
'update_type': 'preferred_site_language_code',
'data': language_code,
}, csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['preferred_site_language_code'], language_code)
self.logout()
def test_save_site_language_no_user(self):
"""The SiteLanguageHandler handler can be called without a user."""
response = self.testapp.get(feconf.SPLASH_URL)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(
response, token_type=feconf.CSRF_PAGE_NAME_I18N)
self.put_json(feconf.SITE_LANGUAGE_DATA_URL, {
'site_language_code': 'es',
}, csrf_token)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from pants.util.contextutil import temporary_file
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestOptionsQuietIntegration(PantsRunIntegrationTest):
def test_pants_default_quietness(self):
pants_run = self.run_pants(['export'])
self.assert_success(pants_run)
json.loads(pants_run.stdout_data)
def test_pants_no_quiet_cli(self):
pants_run = self.run_pants(['--no-quiet', 'export'])
self.assert_success(pants_run)
# Since pants progress will show up in stdout, therefore, json parsing should fail.
with self.assertRaises(ValueError):
json.loads(pants_run.stdout_data)
def test_pants_no_quiet_env(self):
pants_run = self.run_pants(['export'], extra_env={'PANTS_QUIET': 'FALSE'})
self.assert_success(pants_run)
# Since pants progress will show up in stdout, therefore, json parsing should fail.
with self.assertRaises(ValueError):
json.loads(pants_run.stdout_data)
def test_pants_no_quiet_output_file(self):
with temporary_file() as f:
pants_run = self.run_pants(['--no-quiet', 'export', '--output-file={}'.format(f.name)])
self.assert_success(pants_run)
json_string = f.read()
# Make sure the json is valid from the file read.
json.loads(json_string)
# Make sure json string does not appear in stdout.
self.assertNotIn(json_string, pants_run.stdout_data)
|
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.appengine_admin_v1.services.instances import InstancesAsyncClient
from google.cloud.appengine_admin_v1.services.instances import InstancesClient
from google.cloud.appengine_admin_v1.services.instances import pagers
from google.cloud.appengine_admin_v1.services.instances import transports
from google.cloud.appengine_admin_v1.types import appengine
from google.cloud.appengine_admin_v1.types import instance
from google.cloud.appengine_admin_v1.types import operation as ga_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert InstancesClient._get_default_mtls_endpoint(None) is None
assert InstancesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
InstancesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
InstancesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
InstancesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert InstancesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [InstancesClient, InstancesAsyncClient,])
def test_instances_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "appengine.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.InstancesGrpcTransport, "grpc"),
(transports.InstancesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_instances_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [InstancesClient, InstancesAsyncClient,])
def test_instances_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "appengine.googleapis.com:443"
def test_instances_client_get_transport_class():
transport = InstancesClient.get_transport_class()
available_transports = [
transports.InstancesGrpcTransport,
]
assert transport in available_transports
transport = InstancesClient.get_transport_class("grpc")
assert transport == transports.InstancesGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(InstancesClient, transports.InstancesGrpcTransport, "grpc"),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
InstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstancesClient)
)
@mock.patch.object(
InstancesAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstancesAsyncClient),
)
def test_instances_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(InstancesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(InstancesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(InstancesClient, transports.InstancesGrpcTransport, "grpc", "true"),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(InstancesClient, transports.InstancesGrpcTransport, "grpc", "false"),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
InstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstancesClient)
)
@mock.patch.object(
InstancesAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstancesAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_instances_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [InstancesClient, InstancesAsyncClient])
@mock.patch.object(
InstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstancesClient)
)
@mock.patch.object(
InstancesAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstancesAsyncClient),
)
def test_instances_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(InstancesClient, transports.InstancesGrpcTransport, "grpc"),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_instances_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(InstancesClient, transports.InstancesGrpcTransport, "grpc", grpc_helpers),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_instances_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_instances_client_client_options_from_dict():
with mock.patch(
"google.cloud.appengine_admin_v1.services.instances.transports.InstancesGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = InstancesClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(InstancesClient, transports.InstancesGrpcTransport, "grpc", grpc_helpers),
(
InstancesAsyncClient,
transports.InstancesGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_instances_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"appengine.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=None,
default_host="appengine.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [appengine.ListInstancesRequest, dict,])
def test_list_instances(request_type, transport: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = appengine.ListInstancesResponse(
next_page_token="next_page_token_value",
)
response = client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
client.list_instances()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListInstancesRequest()
@pytest.mark.asyncio
async def test_list_instances_async(
transport: str = "grpc_asyncio", request_type=appengine.ListInstancesRequest
):
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
appengine.ListInstancesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_instances_async_from_dict():
await test_list_instances_async(request_type=dict)
def test_list_instances_field_headers():
client = InstancesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = appengine.ListInstancesResponse()
client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_instances_field_headers_async():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
appengine.ListInstancesResponse()
)
await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_instances_pager(transport_name: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListInstancesResponse(
instances=[
instance.Instance(),
instance.Instance(),
instance.Instance(),
],
next_page_token="abc",
),
appengine.ListInstancesResponse(instances=[], next_page_token="def",),
appengine.ListInstancesResponse(
instances=[instance.Instance(),], next_page_token="ghi",
),
appengine.ListInstancesResponse(
instances=[instance.Instance(), instance.Instance(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_instances(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, instance.Instance) for i in results)
def test_list_instances_pages(transport_name: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListInstancesResponse(
instances=[
instance.Instance(),
instance.Instance(),
instance.Instance(),
],
next_page_token="abc",
),
appengine.ListInstancesResponse(instances=[], next_page_token="def",),
appengine.ListInstancesResponse(
instances=[instance.Instance(),], next_page_token="ghi",
),
appengine.ListInstancesResponse(
instances=[instance.Instance(), instance.Instance(),],
),
RuntimeError,
)
pages = list(client.list_instances(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_instances_async_pager():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListInstancesResponse(
instances=[
instance.Instance(),
instance.Instance(),
instance.Instance(),
],
next_page_token="abc",
),
appengine.ListInstancesResponse(instances=[], next_page_token="def",),
appengine.ListInstancesResponse(
instances=[instance.Instance(),], next_page_token="ghi",
),
appengine.ListInstancesResponse(
instances=[instance.Instance(), instance.Instance(),],
),
RuntimeError,
)
async_pager = await client.list_instances(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, instance.Instance) for i in responses)
@pytest.mark.asyncio
async def test_list_instances_async_pages():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListInstancesResponse(
instances=[
instance.Instance(),
instance.Instance(),
instance.Instance(),
],
next_page_token="abc",
),
appengine.ListInstancesResponse(instances=[], next_page_token="def",),
appengine.ListInstancesResponse(
instances=[instance.Instance(),], next_page_token="ghi",
),
appengine.ListInstancesResponse(
instances=[instance.Instance(), instance.Instance(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_instances(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [appengine.GetInstanceRequest, dict,])
def test_get_instance(request_type, transport: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance(
name="name_value",
id="id_value",
app_engine_release="app_engine_release_value",
availability=instance.Instance.Availability.RESIDENT,
vm_name="vm_name_value",
vm_zone_name="vm_zone_name_value",
vm_id="vm_id_value",
requests=892,
errors=669,
qps=0.34,
average_latency=1578,
memory_usage=1293,
vm_status="vm_status_value",
vm_debug_enabled=True,
vm_ip="vm_ip_value",
vm_liveness=instance.Instance.Liveness.LivenessState.UNKNOWN,
)
response = client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.id == "id_value"
assert response.app_engine_release == "app_engine_release_value"
assert response.availability == instance.Instance.Availability.RESIDENT
assert response.vm_name == "vm_name_value"
assert response.vm_zone_name == "vm_zone_name_value"
assert response.vm_id == "vm_id_value"
assert response.requests == 892
assert response.errors == 669
assert math.isclose(response.qps, 0.34, rel_tol=1e-6)
assert response.average_latency == 1578
assert response.memory_usage == 1293
assert response.vm_status == "vm_status_value"
assert response.vm_debug_enabled is True
assert response.vm_ip == "vm_ip_value"
assert response.vm_liveness == instance.Instance.Liveness.LivenessState.UNKNOWN
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
client.get_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.GetInstanceRequest()
@pytest.mark.asyncio
async def test_get_instance_async(
transport: str = "grpc_asyncio", request_type=appengine.GetInstanceRequest
):
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Instance(
name="name_value",
id="id_value",
app_engine_release="app_engine_release_value",
availability=instance.Instance.Availability.RESIDENT,
vm_name="vm_name_value",
vm_zone_name="vm_zone_name_value",
vm_id="vm_id_value",
requests=892,
errors=669,
qps=0.34,
average_latency=1578,
memory_usage=1293,
vm_status="vm_status_value",
vm_debug_enabled=True,
vm_ip="vm_ip_value",
vm_liveness=instance.Instance.Liveness.LivenessState.UNKNOWN,
)
)
response = await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.id == "id_value"
assert response.app_engine_release == "app_engine_release_value"
assert response.availability == instance.Instance.Availability.RESIDENT
assert response.vm_name == "vm_name_value"
assert response.vm_zone_name == "vm_zone_name_value"
assert response.vm_id == "vm_id_value"
assert response.requests == 892
assert response.errors == 669
assert math.isclose(response.qps, 0.34, rel_tol=1e-6)
assert response.average_latency == 1578
assert response.memory_usage == 1293
assert response.vm_status == "vm_status_value"
assert response.vm_debug_enabled is True
assert response.vm_ip == "vm_ip_value"
assert response.vm_liveness == instance.Instance.Liveness.LivenessState.UNKNOWN
@pytest.mark.asyncio
async def test_get_instance_async_from_dict():
await test_get_instance_async(request_type=dict)
def test_get_instance_field_headers():
client = InstancesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = instance.Instance()
client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_instance_field_headers_async():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [appengine.DeleteInstanceRequest, dict,])
def test_delete_instance(request_type, transport: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
client.delete_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DeleteInstanceRequest()
@pytest.mark.asyncio
async def test_delete_instance_async(
transport: str = "grpc_asyncio", request_type=appengine.DeleteInstanceRequest
):
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_instance_async_from_dict():
await test_delete_instance_async(request_type=dict)
def test_delete_instance_field_headers():
client = InstancesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_instance_field_headers_async():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [appengine.DebugInstanceRequest, dict,])
def test_debug_instance(request_type, transport: str = "grpc"):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.debug_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.debug_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DebugInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_debug_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.debug_instance), "__call__") as call:
client.debug_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DebugInstanceRequest()
@pytest.mark.asyncio
async def test_debug_instance_async(
transport: str = "grpc_asyncio", request_type=appengine.DebugInstanceRequest
):
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.debug_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.debug_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.DebugInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_debug_instance_async_from_dict():
await test_debug_instance_async(request_type=dict)
def test_debug_instance_field_headers():
client = InstancesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.DebugInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.debug_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.debug_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_debug_instance_field_headers_async():
client = InstancesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.DebugInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.debug_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.debug_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstancesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = InstancesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = InstancesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstancesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = InstancesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.InstancesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.InstancesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.InstancesGrpcTransport, transports.InstancesGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = InstancesClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.InstancesGrpcTransport,)
def test_instances_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.InstancesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_instances_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.appengine_admin_v1.services.instances.transports.InstancesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.InstancesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_instances",
"get_instance",
"delete_instance",
"debug_instance",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_instances_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.appengine_admin_v1.services.instances.transports.InstancesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstancesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_instances_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.appengine_admin_v1.services.instances.transports.InstancesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstancesTransport()
adc.assert_called_once()
def test_instances_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
InstancesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.InstancesGrpcTransport, transports.InstancesGrpcAsyncIOTransport,],
)
def test_instances_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.InstancesGrpcTransport, grpc_helpers),
(transports.InstancesGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_instances_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"appengine.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=["1", "2"],
default_host="appengine.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.InstancesGrpcTransport, transports.InstancesGrpcAsyncIOTransport],
)
def test_instances_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_instances_host_no_port():
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="appengine.googleapis.com"
),
)
assert client.transport._host == "appengine.googleapis.com:443"
def test_instances_host_with_port():
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="appengine.googleapis.com:8000"
),
)
assert client.transport._host == "appengine.googleapis.com:8000"
def test_instances_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.InstancesGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_instances_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.InstancesGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[transports.InstancesGrpcTransport, transports.InstancesGrpcAsyncIOTransport],
)
def test_instances_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[transports.InstancesGrpcTransport, transports.InstancesGrpcAsyncIOTransport],
)
def test_instances_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_instances_grpc_lro_client():
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_instances_grpc_lro_async_client():
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_instance_path():
app = "squid"
service = "clam"
version = "whelk"
instance = "octopus"
expected = "apps/{app}/services/{service}/versions/{version}/instances/{instance}".format(
app=app, service=service, version=version, instance=instance,
)
actual = InstancesClient.instance_path(app, service, version, instance)
assert expected == actual
def test_parse_instance_path():
expected = {
"app": "oyster",
"service": "nudibranch",
"version": "cuttlefish",
"instance": "mussel",
}
path = InstancesClient.instance_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_instance_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = InstancesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = InstancesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = InstancesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = InstancesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = InstancesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = InstancesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = InstancesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = InstancesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = InstancesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = InstancesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = InstancesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.InstancesTransport, "_prep_wrapped_messages"
) as prep:
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.InstancesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = InstancesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = InstancesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = InstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(InstancesClient, transports.InstancesGrpcTransport),
(InstancesAsyncClient, transports.InstancesGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
"""
Simple flask-based API to access FreeLing functionalities.
"""
__author__ = "Víctor Peinado"
__email__ = "vitojph@gmail.com"
__date__ = "27/06/2013"
import freeling
from flask import Flask, Response, request
from flask.ext.restful import Api, Resource
import json
PUNCTUATION = u""".,;:!? """
FREELINGDIR = "/usr/local/"
DATA = FREELINGDIR + "share/freeling/"
LANG = "en"
freeling.util_init_locale("default");
op = freeling.maco_options(LANG)
op.set_active_modules(0,1,1,1,1,1,1,1,1,1,0)
op.set_data_files("",
DATA + LANG + "/locucions.dat",
DATA + LANG + "/quantities.dat",
DATA + LANG + "/afixos.dat",
DATA + LANG + "/probabilitats.dat",
DATA + LANG + "/dicc.src",
DATA + LANG + "/np.dat",
DATA + "common/punct.dat",
DATA + LANG + "/corrector/corrector.dat")
tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat")
sp = freeling.splitter(DATA + LANG + "/splitter.dat")
mf = freeling.maco(op)
tg = freeling.hmm_tagger(LANG, DATA + LANG + "/tagger.dat", 1, 2)
sen = freeling.senses(DATA+LANG+"/senses.dat")
parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat")
dep = freeling.dep_txala(DATA + LANG+ "/dep/dependences.dat", parser.get_start_symbol())
app = Flask(__name__)
api = Api(app)
def handleParsedTreeAsFL(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append("+(%s %s %s)" % (w.get_form(), w.get_lemma(), w.get_tag()))
else:
# if node is head and has children
if node.is_head():
output.append("+%s_[" % (node.get_label()))
else:
# if node has children but isn't head
output.append("%s_[" % (node.get_label()))
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsFL(child, depth+1, output)
# close node
output.append("]")
return output
def handleParsedTreeAsString(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
parent = tree.get_parent()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append(u"%s/%s/%s" % (w.get_form(), w.get_lemma(), w.get_tag()))
else:
if depth > 0:
output.append(u"%s(" % node.get_label())
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsString(child, depth+1, output)
if depth > 0:
output.append(u")")
return output
def handleParsedTreeAsJSON(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
parent = tree.get_parent()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append(dict(text=w.get_form(), lemma=w.get_lemma(), tag=w.get_tag(), parent=parent.get_info().get_label(), level=depth))
else:
if depth > 0:
output.append(dict(tag=node.get_label(), parent=parent.get_info().get_label(), level=depth))
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsJSON(child, depth+1, output)
return output
def handleDepTree(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
link = node.get_link()
linfo = link.get_info()
parentLabel = None
if depth > 0:
parentLabel = tree.get_parent().get_info().get_label()
w = tree.get_info().get_word()
output.append(dict(parent=parentLabel, rel=node.get_label(), label=link.get_info().get_label(), text=w.get_form(), lemma=w.get_lemma(), tag=w.get_tag()))
nch = tree.num_children()
if nch > 0:
for i in range(nch):
d = tree.nth_child_ref(i)
if not d.get_info().is_chunk():
handleDepTree(d, depth+1, output)
ch = {}
for i in range(nch):
d = tree.nth_child_ref(i)
if d.get_info().is_chunk():
ch[d.get_info().get_chunk_ord()] = d
for i in sorted(ch.keys()):
handleDepTree(ch[i], depth+1, output)
return output
class Splitter(Resource):
"""Splits an input text into sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
# output list of sentences
outputSentences = []
for sentence in sentences:
outputTokens = []
for w in sentence.get_words():
outputTokens.append(w.get_form())
outputSentences.append(dict(oracion=" ".join(outputTokens)))
return Response(json.dumps(outputSentences), mimetype="application/json")
class TokenizerSplitter(Resource):
"""Splits an input text into tokenized sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
# output list of sentences
outputSentences = []
for sentence in sentences:
outputTokens = []
for w in sentence.get_words():
outputTokens.append(w.get_form())
outputSentences.append(dict(oracion=outputTokens))
return Response(json.dumps(outputSentences), mimetype="application/json")
class NERecognizer(Resource):
"""Recognizes Named Entities from an input text."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
# Person (NP00SP0), Geographical location (NP00G00), Organization (NP00O00), and Others (NP00V00)
if word.get_tag() in "NP00SP0 NP00G00 NP00000 NP00V00".split():
entities = []
entities.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
output.append(dict(palabra=word.get_form(), entidades=entities))
return Response(json.dumps(output), mimetype="application/json")
class DatesQuatitiesRecognizer(Resource):
"""Recognizes dates, currencies, and quatities from an input text."""
def post(self):
if text[-1] not in PUNCTUATION:
text = text + "."
text = request.json["texto"]
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
# dates
tag = word.get_tag()
if tag[0] in "W Z".split():
expression = []
if tag == "W":
expression.append(dict(lema=word.get_lemma(), categoria="temporal"))
else:
if tag == "Z":
category = "numero"
elif tag == "Zd":
category = "partitivo"
elif tag == "Zm":
category = "moneda"
elif tag == "Zp":
category = "porcentaje"
elif tag == "Zu":
category = "magnitud"
else:
category = "numero"
expression.append(dict(lema=word.get_lemma(), categoria=category))
output.append(dict(expresion=word.get_form(), entidades=expression))
return Response(json.dumps(output), mimetype="application/json")
class Tagger(Resource):
"""Performs POS tagging from an input text."""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
lemmas = []
lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
output.append(dict(palabra=word.get_form(), lemas=lemmas))
return Response(json.dumps(output), mimetype="application/json")
class WSDTagger(Resource):
"""Performs POS tagging and WSD from an input text."""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
lemmas = []
lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
# split the senses and get just the synset ID
synsets = []
[synsets.append(synsetID.split(":")[0]) for synsetID in word.get_senses_string().split("/")]
output.append(dict(palabra=word.get_form(), lemas=lemmas, synsets=synsets))
return Response(json.dumps(output), mimetype="application/json")
class Parser(Resource):
"""FreeLing parser with three output formats: freeling-like, stanford-like and jsonified"""
def post(self):
"""docstring for post"""
text = request.json["texto"]
try:
format = request.json["format"]
except KeyError:
format = "json"
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
sentences = parser.analyze(sentences)
# set up the output format
parsedtree = []
for sentence in sentences:
tree = sentence.get_parse_tree()
if format == "fl":
parsedtree = handleParsedTreeAsFL(tree.begin(), 0, parsedtree)
elif format == "string":
# add the S(entence) tag
parsedtree.append("S(")
parsedtree = handleParsedTreeAsString(tree.begin(), 0, parsedtree)
# close the (S)entence
parsedtree.append(")")
elif format == "json":
# add the S tag with parent ROOT
parsedtree.append(dict(tag="S", parent="ROOT", level=0))
parsedtree = handleParsedTreeAsJSON(tree.begin(), 0, parsedtree)
# format the output accordingly
if format == "fl" or format == "string":
return Response(json.dumps(dict(tree=" ".join(parsedtree))), mimetype="application/json")
elif format == "json":
return Response(json.dumps(parsedtree), mimetype="application/json")
class DependencyParser(Resource):
"""FreeLing Dependency Parser"""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
sentences = parser.analyze(sentences)
sentences = dep.analyze(sentences)
# set up the output format
deptree = []
for sentence in sentences:
tree = sentence.get_dep_tree()
deptree = handleDepTree(tree.begin(), 0, deptree)
return Response(json.dumps(deptree), mimetype="application/json")
api.add_resource(Splitter, "/splitter")
api.add_resource(TokenizerSplitter, "/tokenizersplitter")
api.add_resource(Tagger, "/tagger")
api.add_resource(WSDTagger, "/wsdtagger")
api.add_resource(NERecognizer, "/ner")
api.add_resource(DatesQuatitiesRecognizer, "/datesquantities")
api.add_resource(Parser, "/parser")
api.add_resource(DependencyParser, "/dep")
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=9999)
|
import logging
import re
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.port import chromium
from webkitpy.layout_tests.port import win
from webkitpy.layout_tests.port import config
_log = logging.getLogger(__name__)
class LinuxPort(chromium.ChromiumPort):
port_name = 'linux'
SUPPORTED_VERSIONS = ('x86', 'x86_64')
FALLBACK_PATHS = { 'x86_64': [ 'linux' ] + win.WinPort.latest_platform_fallback_path() }
FALLBACK_PATHS['x86'] = ['linux-x86'] + FALLBACK_PATHS['x86_64']
DEFAULT_BUILD_DIRECTORIES = ('sconsbuild', 'out')
@classmethod
def _determine_driver_path_statically(cls, host, options):
config_object = config.Config(host.executive, host.filesystem)
build_directory = getattr(options, 'build_directory', None)
webkit_base = WebKitFinder(host.filesystem).webkit_base()
chromium_base = cls._chromium_base_dir(host.filesystem)
driver_name = getattr(options, 'driver_name', None)
if driver_name is None:
driver_name = cls.CONTENT_SHELL_NAME
if hasattr(options, 'configuration') and options.configuration:
configuration = options.configuration
else:
configuration = config_object.default_configuration()
return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, [driver_name])
@staticmethod
def _determine_architecture(filesystem, executive, driver_path):
file_output = ''
if filesystem.exists(driver_path):
# The --dereference flag tells file to follow symlinks
file_output = executive.run_command(['file', '--brief', '--dereference', driver_path], return_stderr=True)
if re.match(r'ELF 32-bit LSB\s+executable', file_output):
return 'x86'
if re.match(r'ELF 64-bit LSB\s+executable', file_output):
return 'x86_64'
if file_output:
_log.warning('Could not determine architecture from "file" output: %s' % file_output)
# We don't know what the architecture is; default to 'x86' because
# maybe we're rebaselining and the binary doesn't actually exist,
# or something else weird is going on. It's okay to do this because
# if we actually try to use the binary, check_build() should fail.
return 'x86_64'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('linux'):
return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
return port_name
def __init__(self, host, port_name, **kwargs):
chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
(base, arch) = port_name.rsplit('-', 1)
assert base == 'linux'
assert arch in self.SUPPORTED_VERSIONS
assert port_name in ('linux', 'linux-x86', 'linux-x86_64')
self._version = 'lucid' # We only support lucid right now.
self._architecture = arch
def additional_drt_flag(self):
flags = super(LinuxPort, self).additional_drt_flag()
# FIXME: Temporarily disable the sandbox on Linux until we can get
# stacktraces via breakpad. http://crbug.com/247431
flags += ['--no-sandbox']
return flags
def default_baseline_search_path(self):
port_names = self.FALLBACK_PATHS[self._architecture]
return map(self._webkit_baseline_path, port_names)
def _modules_to_search_for_symbols(self):
return [self._build_path('libffmpegsumo.so')]
def check_build(self, needs_http):
result = chromium.ChromiumPort.check_build(self, needs_http)
if not result:
_log.error('For complete Linux build requirements, please see:')
_log.error('')
_log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
return result
def operating_system(self):
return 'linux'
#
# PROTECTED METHODS
#
def _check_apache_install(self):
result = self._check_file_exists(self._path_to_apache(), "apache2")
result = self._check_file_exists(self._path_to_apache_config_file(), "apache2 config file") and result
if not result:
_log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
_log.error('')
return result
def _check_lighttpd_install(self):
result = self._check_file_exists(
self._path_to_lighttpd(), "LigHTTPd executable")
result = self._check_file_exists(self._path_to_lighttpd_php(), "PHP CGI executable") and result
result = self._check_file_exists(self._path_to_lighttpd_modules(), "LigHTTPd modules") and result
if not result:
_log.error(' Please install using: "sudo apt-get install lighttpd php5-cgi"')
_log.error('')
return result
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
def _path_to_apache(self):
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
def _path_to_lighttpd_modules(self):
return "/usr/lib/lighttpd"
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
def _path_to_driver(self, configuration=None):
binary_name = self.driver_name()
return self._build_path_with_configuration(configuration, binary_name)
def _path_to_helper(self):
return None
|
"""Layer serialization/deserialization functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.layers.advanced_activations import *
from tensorflow.python.keras.layers.convolutional import *
from tensorflow.python.keras.layers.convolutional_recurrent import *
from tensorflow.python.keras.layers.core import *
from tensorflow.python.keras.layers.cudnn_recurrent import *
from tensorflow.python.keras.layers.embeddings import *
from tensorflow.python.keras.layers.local import *
from tensorflow.python.keras.layers.merge import *
from tensorflow.python.keras.layers.noise import *
from tensorflow.python.keras.layers.normalization import *
from tensorflow.python.keras.layers.pooling import *
from tensorflow.python.keras.layers.recurrent import *
from tensorflow.python.keras.layers.wrappers import *
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.util.tf_export import keras_export
if tf2.enabled():
from tensorflow.python.keras.layers.normalization_v2 import * # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.layers.recurrent_v2 import * # pylint: disable=g-import-not-at-top
_DESERIALIZATION_TABLE = {
'BatchNormalizationV1': 'BatchNormalization',
'BatchNormalizationV2': 'BatchNormalization',
}
@keras_export('keras.layers.serialize')
def serialize(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
globs = globals() # All layers.
globs['Network'] = models.Network
globs['Model'] = models.Model
globs['Sequential'] = models.Sequential
layer_class_name = config['class_name']
if layer_class_name in _DESERIALIZATION_TABLE:
config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name]
return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer')
|
import errno
import gym
import logging
import math
import numpy as np
import os
import tree # pip install dm_tree
from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING
import ray
import ray.experimental.tf_utils
from ray.util.debug import log_once
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils import force_list
from ray.rllib.utils.annotations import DeveloperAPI, override
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.deprecation import Deprecated, deprecation_warning
from ray.rllib.utils.framework import try_import_tf, get_variable
from ray.rllib.utils.metrics import NUM_AGENT_STEPS_TRAINED
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.rllib.utils.schedules import PiecewiseSchedule
from ray.rllib.utils.spaces.space_utils import normalize_action
from ray.rllib.utils.tf_utils import get_gpu_devices
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils.typing import (
LocalOptimizer,
ModelGradients,
TensorType,
TrainerConfigDict,
)
if TYPE_CHECKING:
from ray.rllib.evaluation import Episode
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
class TFPolicy(Policy):
"""An agent policy and loss implemented in TensorFlow.
Do not sub-class this class directly (neither should you sub-class
DynamicTFPolicy), but rather use
rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
Extending this class enables RLlib to perform TensorFlow specific
optimizations on the policy, e.g., parallelization across gpus or
fusing multiple graphs together in the multi-agent setting.
Input tensors are typically shaped like [BATCH_SIZE, ...].
Examples:
>>> policy = TFPolicySubclass(
sess, obs_input, sampled_action, loss, loss_inputs)
>>> print(policy.compute_actions([1, 0, 2]))
(array([0, 1, 1]), [], {})
>>> print(policy.postprocess_trajectory(SampleBatch({...})))
SampleBatch({"action": ..., "advantages": ..., ...})
"""
@DeveloperAPI
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: Union[TensorType, List[TensorType]],
loss_inputs: List[Tuple[str, TensorType]],
model: Optional[ModelV2] = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None,
):
"""Initializes a Policy object.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: Policy-specific configuration data.
sess: The TensorFlow session to use.
obs_input: Input placeholder for observations, of shape
[BATCH_SIZE, obs...].
sampled_action: Tensor for sampling an action, of shape
[BATCH_SIZE, action...]
loss: Scalar policy loss output tensor or a list thereof
(in case there is more than one loss).
loss_inputs: A (name, placeholder) tuple for each loss input
argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model: The optional ModelV2 to use for calculating actions and
losses. If not None, TFPolicy will provide functionality for
getting variables, calling the model's custom loss (if
provided), and importing weights into the model.
sampled_action_logp: log probability of the sampled action.
action_input: Input placeholder for actions for
logp/log-likelihood calculations.
log_likelihood: Tensor to calculate the log_likelihood (given
action_input and obs_input).
dist_class: An optional ActionDistribution class to use for
generating a dist object from distribution inputs.
dist_inputs: Tensor to calculate the distribution
inputs/parameters.
state_inputs: List of RNN state input Tensors.
state_outputs: List of RNN state output Tensors.
prev_action_input: placeholder for previous actions.
prev_reward_input: placeholder for previous rewards.
seq_lens: Placeholder for RNN sequence lengths, of shape
[NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len: Max sequence length for LSTM training.
batch_divisibility_req: pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops: override the batchnorm update ops
to run when applying gradients. Otherwise we run all update
ops found in the current variable scope.
explore: Placeholder for `explore` parameter into call to
Exploration.get_exploration_action. Explicitly set this to
False for not creating any Exploration component.
timestep: Placeholder for the global sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Get devices to build the graph on.
worker_idx = self.config.get("worker_index", 0)
if not config["_fake_gpus"] and ray.worker._mode() == ray.worker.LOCAL_MODE:
num_gpus = 0
elif worker_idx == 0:
num_gpus = config["num_gpus"]
else:
num_gpus = config["num_gpus_per_worker"]
gpu_ids = get_gpu_devices()
# Place on one or more CPU(s) when either:
# - Fake GPU mode.
# - num_gpus=0 (either set by user or we are in local_mode=True).
# - no GPUs available.
if config["_fake_gpus"] or num_gpus == 0 or not gpu_ids:
logger.info(
"TFPolicy (worker={}) running on {}.".format(
worker_idx if worker_idx > 0 else "local",
f"{num_gpus} fake-GPUs" if config["_fake_gpus"] else "CPU",
)
)
self.devices = ["/cpu:0" for _ in range(int(math.ceil(num_gpus)) or 1)]
# Place on one or more actual GPU(s), when:
# - num_gpus > 0 (set by user) AND
# - local_mode=False AND
# - actual GPUs available AND
# - non-fake GPU mode.
else:
logger.info(
"TFPolicy (worker={}) running on {} GPU(s).".format(
worker_idx if worker_idx > 0 else "local", num_gpus
)
)
# We are a remote worker (WORKER_MODE=1):
# GPUs should be assigned to us by ray.
if ray.worker._mode() == ray.worker.WORKER_MODE:
gpu_ids = ray.get_gpu_ids()
if len(gpu_ids) < num_gpus:
raise ValueError(
"TFPolicy was not able to find enough GPU IDs! Found "
f"{gpu_ids}, but num_gpus={num_gpus}."
)
self.devices = [f"/gpu:{i}" for i, _ in enumerate(gpu_ids) if i < num_gpus]
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
self.view_requirements[SampleBatch.INFOS].used_for_compute_actions = False
assert model is None or isinstance(model, (ModelV2, tf.keras.Model)), (
"Model classes for TFPolicy other than `ModelV2|tf.keras.Model` "
"not allowed! You passed in {}.".format(model)
)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_view_requirements_from_init_state()
# If `explore` is explicitly set to False, don't create an exploration
# component.
self.exploration = self._create_exploration() if explore is not False else None
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = (
explore
if explore is not None
else tf1.placeholder_with_default(True, (), name="is_exploring")
)
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (
tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None
)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined"
)
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = (
timestep
if timestep is not None
else tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep"
)
)
self._optimizers: List[LocalOptimizer] = []
# Backward compatibility and for some code shared with tf-eager Policy.
self._optimizer = None
self._grads_and_vars: Union[ModelGradients, List[ModelGradients]] = []
self._grads: Union[ModelGradients, List[ModelGradients]] = []
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer(s)' tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables: Optional[
ray.experimental.tf_utils.TensorFlowVariables
] = None
# The loss tf-op(s). Number of losses must match number of optimizers.
self._losses = []
# Backward compatibility (in case custom child TFPolicies access this
# property).
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
losses = force_list(loss)
if len(losses) > 0:
self._initialize_loss(losses, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if (
self._log_likelihood is None
and self._dist_inputs is not None
and self.dist_class is not None
):
self._log_likelihood = self.dist_class(self._dist_inputs, self.model).logp(
self._action_input
)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Union[SampleBatch, Dict[str, TensorType]],
explore: bool = None,
timestep: Optional[int] = None,
episodes: Optional[List["Episode"]] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Switch off is_training flag in our batch.
if isinstance(input_dict, SampleBatch):
input_dict.set_training(False)
else:
# Deprecated dict input.
input_dict["is_training"] = False
builder = TFRunBuilder(self.get_session(), "compute_actions_from_input_dict")
obs_batch = input_dict[SampleBatch.OBS]
to_fetch = self._build_compute_actions(
builder, input_dict=input_dict, explore=explore, timestep=timestep
)
# Execute session run to get action (and other fetches).
fetched = builder.get(to_fetch)
# Update our global timestep by the batch size.
self.global_timestep += (
len(obs_batch)
if isinstance(obs_batch, list)
else len(input_dict)
if isinstance(input_dict, SampleBatch)
else obs_batch.shape[0]
)
return fetched
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["Episode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
builder = TFRunBuilder(self.get_session(), "compute_actions")
input_dict = {SampleBatch.OBS: obs_batch, "is_training": False}
if state_batches:
for i, s in enumerate(state_batches):
input_dict[f"state_in_{i}"] = s
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
to_fetch = self._build_compute_actions(
builder, input_dict=input_dict, explore=explore, timestep=timestep
)
# Execute session run to get action (and other fetches).
fetched = builder.get(to_fetch)
# Update our global timestep by the batch size.
self.global_timestep += (
len(obs_batch)
if isinstance(obs_batch, list)
else tree.flatten(obs_batch)[0].shape[0]
)
return fetched
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None,
prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None,
actions_normalized: bool = True,
) -> TensorType:
if self._log_likelihood is None:
raise ValueError(
"Cannot compute log-prob/likelihood w/o a " "self._log_likelihood op!"
)
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(
explore=False, tf_sess=self.get_session()
)
builder = TFRunBuilder(self.get_session(), "compute_log_likelihoods")
# Normalize actions if necessary.
if actions_normalized is False and self.config["normalize_actions"]:
actions = normalize_action(actions, self.action_space_struct)
# Feed actions (for which we want logp values) into graph.
builder.add_feed_dict({self._action_input: actions})
# Feed observations.
builder.add_feed_dict({self._obs_input: obs_batch})
# Internal states.
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".format(
self._state_inputs, state_batches
)
)
builder.add_feed_dict({k: v for k, v in zip(self._state_inputs, state_batches)})
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
# Prev-a and r.
if self._prev_action_input is not None and prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
# Fetch the log_likelihoods output and return.
fetches = builder.add_fetches([self._log_likelihood])
return builder.get(fetches)[0]
@override(Policy)
@DeveloperAPI
def learn_on_batch(self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
assert self.loss_initialized()
# Switch on is_training flag in our batch.
postprocessed_batch.set_training(True)
builder = TFRunBuilder(self.get_session(), "learn_on_batch")
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch, result=learn_stats
)
fetches = self._build_learn_on_batch(builder, postprocessed_batch)
stats = builder.get(fetches)
stats.update(
{
"custom_metrics": learn_stats,
NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count,
}
)
return stats
@override(Policy)
@DeveloperAPI
def compute_gradients(
self, postprocessed_batch: SampleBatch
) -> Tuple[ModelGradients, Dict[str, TensorType]]:
assert self.loss_initialized()
# Switch on is_training flag in our batch.
postprocessed_batch.set_training(True)
builder = TFRunBuilder(self.get_session(), "compute_gradients")
fetches = self._build_compute_gradients(builder, postprocessed_batch)
return builder.get(fetches)
@override(Policy)
@DeveloperAPI
def apply_gradients(self, gradients: ModelGradients) -> None:
assert self.loss_initialized()
builder = TFRunBuilder(self.get_session(), "apply_gradients")
fetches = self._build_apply_gradients(builder, gradients)
builder.get(fetches)
@override(Policy)
@DeveloperAPI
def get_weights(self) -> Union[Dict[str, TensorType], List[TensorType]]:
return self._variables.get_weights()
@override(Policy)
@DeveloperAPI
def set_weights(self, weights) -> None:
return self._variables.set_weights(weights)
@override(Policy)
@DeveloperAPI
def get_exploration_state(self) -> Dict[str, TensorType]:
return self.exploration.get_state(sess=self.get_session())
@Deprecated(new="get_exploration_state", error=False)
def get_exploration_info(self) -> Dict[str, TensorType]:
return self.get_exploration_state()
@override(Policy)
@DeveloperAPI
def is_recurrent(self) -> bool:
return len(self._state_inputs) > 0
@override(Policy)
@DeveloperAPI
def num_state_tensors(self) -> int:
return len(self._state_inputs)
@override(Policy)
@DeveloperAPI
def get_state(self) -> Union[Dict[str, TensorType], List[TensorType]]:
# For tf Policies, return Policy weights and optimizer var values.
state = super().get_state()
if len(self._optimizer_variables.variables) > 0:
state["_optimizer_variables"] = self.get_session().run(
self._optimizer_variables.variables
)
# Add exploration state.
state["_exploration_state"] = self.exploration.get_state(self.get_session())
return state
@override(Policy)
@DeveloperAPI
def set_state(self, state: dict) -> None:
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars is not None:
self._optimizer_variables.set_weights(optimizer_vars)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(
state=state["_exploration_state"], sess=self.get_session()
)
# Set the Policy's (NN) weights.
super().set_state(state)
@override(Policy)
@DeveloperAPI
def export_checkpoint(
self, export_dir: str, filename_prefix: str = "model"
) -> None:
"""Export tensorflow checkpoint to export_dir."""
try:
os.makedirs(export_dir)
except OSError as e:
# ignore error if export dir already exists
if e.errno != errno.EEXIST:
raise
save_path = os.path.join(export_dir, filename_prefix)
with self.get_session().graph.as_default():
saver = tf1.train.Saver()
saver.save(self.get_session(), save_path)
@override(Policy)
@DeveloperAPI
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Export tensorflow graph to export_dir for serving."""
if onnx:
try:
import tf2onnx
except ImportError as e:
raise RuntimeError(
"Converting a TensorFlow model to ONNX requires "
"`tf2onnx` to be installed. Install with "
"`pip install tf2onnx`."
) from e
with self.get_session().graph.as_default():
signature_def_map = self._build_signature_def()
sd = signature_def_map[
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # noqa: E501
]
inputs = [v.name for k, v in sd.inputs.items()]
outputs = [v.name for k, v in sd.outputs.items()]
from tf2onnx import tf_loader
frozen_graph_def = tf_loader.freeze_session(
self._sess, input_names=inputs, output_names=outputs
)
with tf1.Session(graph=tf.Graph()) as session:
tf.import_graph_def(frozen_graph_def, name="")
g = tf2onnx.tfonnx.process_tf_graph(
session.graph,
input_names=inputs,
output_names=outputs,
inputs_as_nchw=inputs,
)
model_proto = g.make_model("onnx_model")
tf2onnx.utils.save_onnx_model(
export_dir, "saved_model", feed_dict={}, model_proto=model_proto
)
else:
with self.get_session().graph.as_default():
signature_def_map = self._build_signature_def()
builder = tf1.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
self.get_session(),
[tf1.saved_model.tag_constants.SERVING],
signature_def_map=signature_def_map,
saver=tf1.summary.FileWriter(export_dir).add_graph(
graph=self.get_session().graph
),
)
builder.save()
@override(Policy)
@DeveloperAPI
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into tf model."""
if self.model is None:
raise NotImplementedError("No `self.model` to import into!")
# Make sure the session is the right one (see issue #7046).
with self.get_session().graph.as_default():
with self.get_session().as_default():
return self.model.import_from_h5(import_file)
@override(Policy)
def get_session(self) -> Optional["tf1.Session"]:
"""Returns a reference to the TF session for this policy."""
return self._sess
def variables(self):
"""Return the list of all savable variables for this policy."""
if self.model is None:
raise NotImplementedError("No `self.model` to get variables for!")
elif isinstance(self.model, tf.keras.Model):
return self.model.variables
else:
return self.model.variables()
def get_placeholder(self, name) -> "tf1.placeholder":
"""Returns the given action or loss input placeholder by name.
If the loss has not been initialized and a loss input placeholder is
requested, an error is raised.
Args:
name (str): The name of the placeholder to return. One of
SampleBatch.CUR_OBS|PREV_ACTION/REWARD or a valid key from
`self._loss_input_dict`.
Returns:
tf1.placeholder: The placeholder under the given str key.
"""
if name == SampleBatch.CUR_OBS:
return self._obs_input
elif name == SampleBatch.PREV_ACTIONS:
return self._prev_action_input
elif name == SampleBatch.PREV_REWARDS:
return self._prev_reward_input
assert self._loss_input_dict, (
"You need to populate `self._loss_input_dict` before "
"`get_placeholder()` can be called"
)
return self._loss_input_dict[name]
def loss_initialized(self) -> bool:
"""Returns whether the loss term(s) have been initialized."""
return len(self._losses) > 0
def _initialize_loss(
self, losses: List[TensorType], loss_inputs: List[Tuple[str, TensorType]]
) -> None:
"""Initializes the loss op from given loss tensor and placeholders.
Args:
loss (List[TensorType]): The list of loss ops returned by some
loss function.
loss_inputs (List[Tuple[str, TensorType]]): The list of Tuples:
(name, tf1.placeholders) needed for calculating the loss.
"""
self._loss_input_dict = dict(loss_inputs)
self._loss_input_dict_no_rnn = {
k: v
for k, v in self._loss_input_dict.items()
if (v not in self._state_inputs and v != self._seq_lens)
}
for i, ph in enumerate(self._state_inputs):
self._loss_input_dict["state_in_{}".format(i)] = ph
if self.model and not isinstance(self.model, tf.keras.Model):
self._losses = force_list(
self.model.custom_loss(losses, self._loss_input_dict)
)
self._stats_fetches.update({"model": self.model.metrics()})
else:
self._losses = losses
# Backward compatibility.
self._loss = self._losses[0] if self._losses is not None else None
if not self._optimizers:
self._optimizers = force_list(self.optimizer())
# Backward compatibility.
self._optimizer = self._optimizers[0] if self._optimizers else None
# Supporting more than one loss/optimizer.
if self.config["_tf_policy_handles_more_than_one_loss"]:
self._grads_and_vars = []
self._grads = []
for group in self.gradients(self._optimizers, self._losses):
g_and_v = [(g, v) for (g, v) in group if g is not None]
self._grads_and_vars.append(g_and_v)
self._grads.append([g for (g, _) in g_and_v])
# Only one optimizer and and loss term.
else:
self._grads_and_vars = [
(g, v)
for (g, v) in self.gradients(self._optimizer, self._loss)
if g is not None
]
self._grads = [g for (g, _) in self._grads_and_vars]
if self.model:
self._variables = ray.experimental.tf_utils.TensorFlowVariables(
[], self.get_session(), self.variables()
)
# Gather update ops for any batch norm layers.
if len(self.devices) <= 1:
if not self._update_ops:
self._update_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name
)
if self._update_ops:
logger.info(
"Update ops to run on apply gradient: {}".format(self._update_ops)
)
with tf1.control_dependencies(self._update_ops):
self._apply_op = self.build_apply_op(
optimizer=self._optimizers
if self.config["_tf_policy_handles_more_than_one_loss"]
else self._optimizer,
grads_and_vars=self._grads_and_vars,
)
if log_once("loss_used"):
logger.debug(
"These tensors were used in the loss functions:"
f"\n{summarize(self._loss_input_dict)}\n"
)
self.get_session().run(tf1.global_variables_initializer())
# TensorFlowVariables holing a flat list of all our optimizers'
# variables.
self._optimizer_variables = ray.experimental.tf_utils.TensorFlowVariables(
[v for o in self._optimizers for v in o.variables()], self.get_session()
)
@DeveloperAPI
def copy(self, existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> "TFPolicy":
"""Creates a copy of self using existing input placeholders.
Optional: Only required to work with the multi-GPU optimizer.
Args:
existing_inputs (List[Tuple[str, tf1.placeholder]]): Dict mapping
names (str) to tf1.placeholders to re-use (share) with the
returned copy of self.
Returns:
TFPolicy: A copy of self.
"""
raise NotImplementedError
@DeveloperAPI
def extra_compute_action_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute actions session run.
Returns:
Dict[TensorType, TensorType]: A feed dict to be added to the
feed_dict passed to the compute_actions session.run() call.
"""
return {}
@DeveloperAPI
def extra_compute_action_fetches(self) -> Dict[str, TensorType]:
"""Extra values to fetch and return from compute_actions().
By default we return action probability/log-likelihood info
and action distribution inputs (if present).
Returns:
Dict[str, TensorType]: An extra fetch-dict to be passed to and
returned from the compute_actions() call.
"""
extra_fetches = {}
# Action-logp and action-prob.
if self._sampled_action_logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob
extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp
# Action-dist inputs.
if self._dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs
return extra_fetches
@DeveloperAPI
def extra_compute_grad_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute gradients session run.
Returns:
Dict[TensorType, TensorType]: Extra feed_dict to be passed to the
compute_gradients Session.run() call.
"""
return {} # e.g, kl_coeff
@DeveloperAPI
def extra_compute_grad_fetches(self) -> Dict[str, any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Dict[str, any]: Extra fetch dict to be added to the fetch dict
of the compute_gradients Session.run() call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@DeveloperAPI
def optimizer(self) -> "tf.keras.optimizers.Optimizer":
"""TF optimizer to use for policy optimization.
Returns:
tf.keras.optimizers.Optimizer: The local optimizer to use for this
Policy's Model.
"""
if hasattr(self, "config") and "lr" in self.config:
return tf1.train.AdamOptimizer(learning_rate=self.config["lr"])
else:
return tf1.train.AdamOptimizer()
@DeveloperAPI
def gradients(
self,
optimizer: Union[LocalOptimizer, List[LocalOptimizer]],
loss: Union[TensorType, List[TensorType]],
) -> Union[List[ModelGradients], List[List[ModelGradients]]]:
"""Override this for a custom gradient computation behavior.
Args:
optimizer (Union[LocalOptimizer, List[LocalOptimizer]]): A single
LocalOptimizer of a list thereof to use for gradient
calculations. If more than one optimizer given, the number of
optimizers must match the number of losses provided.
loss (Union[TensorType, List[TensorType]]): A single loss term
or a list thereof to use for gradient calculations.
If more than one loss given, the number of loss terms must
match the number of optimizers provided.
Returns:
Union[List[ModelGradients], List[List[ModelGradients]]]: List of
ModelGradients (grads and vars OR just grads) OR List of List
of ModelGradients in case we have more than one
optimizer/loss.
"""
optimizers = force_list(optimizer)
losses = force_list(loss)
# We have more than one optimizers and loss terms.
if self.config["_tf_policy_handles_more_than_one_loss"]:
grads = []
for optim, loss_ in zip(optimizers, losses):
grads.append(optim.compute_gradients(loss_))
# We have only one optimizer and one loss term.
else:
return optimizers[0].compute_gradients(losses[0])
@DeveloperAPI
def build_apply_op(
self,
optimizer: Union[LocalOptimizer, List[LocalOptimizer]],
grads_and_vars: Union[ModelGradients, List[ModelGradients]],
) -> "tf.Operation":
"""Override this for a custom gradient apply computation behavior.
Args:
optimizer (Union[LocalOptimizer, List[LocalOptimizer]]): The local
tf optimizer to use for applying the grads and vars.
grads_and_vars (Union[ModelGradients, List[ModelGradients]]): List
of tuples with grad values and the grad-value's corresponding
tf.variable in it.
Returns:
tf.Operation: The tf op that applies all computed gradients
(`grads_and_vars`) to the model(s) via the given optimizer(s).
"""
optimizers = force_list(optimizer)
# We have more than one optimizers and loss terms.
if self.config["_tf_policy_handles_more_than_one_loss"]:
ops = []
for i, optim in enumerate(optimizers):
# Specify global_step (e.g. for TD3 which needs to count the
# num updates that have happened).
ops.append(
optim.apply_gradients(
grads_and_vars[i],
global_step=tf1.train.get_or_create_global_step(),
)
)
return tf.group(ops)
# We have only one optimizer and one loss term.
else:
return optimizers[0].apply_gradients(
grads_and_vars, global_step=tf1.train.get_or_create_global_step()
)
def _get_is_training_placeholder(self):
"""Get the placeholder for _is_training, i.e., for batch norm layers.
This can be called safely before __init__ has run.
"""
if not hasattr(self, "_is_training"):
self._is_training = tf1.placeholder_with_default(
False, (), name="is_training"
)
return self._is_training
def _debug_vars(self):
if log_once("grad_vars"):
if self.config["_tf_policy_handles_more_than_one_loss"]:
for group in self._grads_and_vars:
for _, v in group:
logger.info("Optimizing variable {}".format(v))
else:
for _, v in self._grads_and_vars:
logger.info("Optimizing variable {}".format(v))
def _extra_input_signature_def(self):
"""Extra input signatures to add when exporting tf model.
Inferred from extra_compute_action_feed_dict()
"""
feed_dict = self.extra_compute_action_feed_dict()
return {
k.name: tf1.saved_model.utils.build_tensor_info(k) for k in feed_dict.keys()
}
def _extra_output_signature_def(self):
"""Extra output signatures to add when exporting tf model.
Inferred from extra_compute_action_fetches()
"""
fetches = self.extra_compute_action_fetches()
return {
k: tf1.saved_model.utils.build_tensor_info(fetches[k])
for k in fetches.keys()
}
def _build_signature_def(self):
"""Build signature def map for tensorflow SavedModelBuilder."""
# build input signatures
input_signature = self._extra_input_signature_def()
input_signature["observations"] = tf1.saved_model.utils.build_tensor_info(
self._obs_input
)
if self._seq_lens is not None:
input_signature[
SampleBatch.SEQ_LENS
] = tf1.saved_model.utils.build_tensor_info(self._seq_lens)
if self._prev_action_input is not None:
input_signature["prev_action"] = tf1.saved_model.utils.build_tensor_info(
self._prev_action_input
)
if self._prev_reward_input is not None:
input_signature["prev_reward"] = tf1.saved_model.utils.build_tensor_info(
self._prev_reward_input
)
input_signature["is_training"] = tf1.saved_model.utils.build_tensor_info(
self._is_training
)
if self._timestep is not None:
input_signature["timestep"] = tf1.saved_model.utils.build_tensor_info(
self._timestep
)
for state_input in self._state_inputs:
input_signature[state_input.name] = tf1.saved_model.utils.build_tensor_info(
state_input
)
# build output signatures
output_signature = self._extra_output_signature_def()
for i, a in enumerate(tf.nest.flatten(self._sampled_action)):
output_signature[
"actions_{}".format(i)
] = tf1.saved_model.utils.build_tensor_info(a)
for state_output in self._state_outputs:
output_signature[
state_output.name
] = tf1.saved_model.utils.build_tensor_info(state_output)
signature_def = tf1.saved_model.signature_def_utils.build_signature_def(
input_signature,
output_signature,
tf1.saved_model.signature_constants.PREDICT_METHOD_NAME,
)
signature_def_key = (
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
)
signature_def_map = {signature_def_key: signature_def}
return signature_def_map
def _build_compute_actions(
self,
builder,
*,
input_dict=None,
obs_batch=None,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(
timestep=timestep, explore=explore, tf_sess=self.get_session()
)
builder.add_feed_dict(self.extra_compute_action_feed_dict())
# `input_dict` given: Simply build what's in that dict.
if input_dict is not None:
if hasattr(self, "_input_dict"):
for key, value in input_dict.items():
if key in self._input_dict:
# Handle complex/nested spaces as well.
tree.map_structure(
lambda k, v: builder.add_feed_dict({k: v}),
self._input_dict[key],
value,
)
# For policies that inherit directly from TFPolicy.
else:
builder.add_feed_dict({self._obs_input: input_dict[SampleBatch.OBS]})
if SampleBatch.PREV_ACTIONS in input_dict:
builder.add_feed_dict(
{self._prev_action_input: input_dict[SampleBatch.PREV_ACTIONS]}
)
if SampleBatch.PREV_REWARDS in input_dict:
builder.add_feed_dict(
{self._prev_reward_input: input_dict[SampleBatch.PREV_REWARDS]}
)
state_batches = []
i = 0
while "state_in_{}".format(i) in input_dict:
state_batches.append(input_dict["state_in_{}".format(i)])
i += 1
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
if "state_in_0" in input_dict and SampleBatch.SEQ_LENS not in input_dict:
builder.add_feed_dict(
{self._seq_lens: np.ones(len(input_dict["state_in_0"]))}
)
# Hardcoded old way: Build fixed fields, if provided.
# TODO: (sven) This can be deprecated after trajectory view API flag is
# removed and always True.
else:
if log_once("_build_compute_actions_input_dict"):
deprecation_warning(
old="_build_compute_actions(.., obs_batch=.., ..)",
new="_build_compute_actions(.., input_dict=..)",
error=False,
)
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, "
"got {}".format(self._state_inputs, state_batches)
)
tree.map_structure(
lambda k, v: builder.add_feed_dict({k: v}),
self._obs_input,
obs_batch,
)
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
if self._prev_action_input is not None and prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
builder.add_feed_dict({self._is_training: False})
builder.add_feed_dict({self._is_exploring: explore})
if timestep is not None:
builder.add_feed_dict({self._timestep: timestep})
# Determine, what exactly to fetch from the graph.
to_fetch = (
[self._sampled_action]
+ self._state_outputs
+ [self.extra_compute_action_fetches()]
)
# Perform the session call.
fetches = builder.add_fetches(to_fetch)
return fetches[0], fetches[1:-1], fetches[-1]
def _build_compute_gradients(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False)
)
fetches = builder.add_fetches([self._grads, self._get_grad_and_stats_fetches()])
return fetches[0], fetches[1]
def _build_apply_gradients(self, builder, gradients):
if len(gradients) != len(self._grads):
raise ValueError(
"Unexpected number of gradients to apply, got {} for {}".format(
gradients, self._grads
)
)
builder.add_feed_dict({self._is_training: True})
builder.add_feed_dict(dict(zip(self._grads, gradients)))
fetches = builder.add_fetches([self._apply_op])
return fetches[0]
def _build_learn_on_batch(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False)
)
fetches = builder.add_fetches(
[
self._apply_op,
self._get_grad_and_stats_fetches(),
]
)
return fetches[1]
def _get_grad_and_stats_fetches(self):
fetches = self.extra_compute_grad_fetches()
if LEARNER_STATS_KEY not in fetches:
raise ValueError("Grad fetches should contain 'stats': {...} entry")
if self._stats_fetches:
fetches[LEARNER_STATS_KEY] = dict(
self._stats_fetches, **fetches[LEARNER_STATS_KEY]
)
return fetches
def _get_loss_inputs_dict(self, train_batch: SampleBatch, shuffle: bool):
"""Return a feed dict from a batch.
Args:
train_batch (SampleBatch): batch of data to derive inputs from.
shuffle (bool): whether to shuffle batch sequences. Shuffle may
be done in-place. This only makes sense if you're further
applying minibatch SGD after getting the outputs.
Returns:
Feed dict of data.
"""
# Get batch ready for RNNs, if applicable.
if not isinstance(train_batch, SampleBatch) or not train_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
train_batch,
max_seq_len=self._max_seq_len,
shuffle=shuffle,
batch_divisibility_req=self._batch_divisibility_req,
feature_keys=list(self._loss_input_dict_no_rnn.keys()),
view_requirements=self.view_requirements,
)
# Mark the batch as "is_training" so the Model can use this
# information.
train_batch.set_training(True)
# Build the feed dict from the batch.
feed_dict = {}
for key, placeholders in self._loss_input_dict.items():
tree.map_structure(
lambda ph, v: feed_dict.__setitem__(ph, v),
placeholders,
train_batch[key],
)
state_keys = ["state_in_{}".format(i) for i in range(len(self._state_inputs))]
for key in state_keys:
feed_dict[self._loss_input_dict[key]] = train_batch[key]
if state_keys:
feed_dict[self._seq_lens] = train_batch[SampleBatch.SEQ_LENS]
return feed_dict
@DeveloperAPI
class LearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self._lr_schedule = None
if lr_schedule is None:
self.cur_lr = tf1.get_variable("lr", initializer=lr, trainable=False)
else:
self._lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1], framework=None
)
self.cur_lr = tf1.get_variable(
"lr", initializer=self._lr_schedule.value(0), trainable=False
)
if self.framework == "tf":
self._lr_placeholder = tf1.placeholder(dtype=tf.float32, name="lr")
self._lr_update = self.cur_lr.assign(
self._lr_placeholder, read_value=False
)
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
if self._lr_schedule is not None:
new_val = self._lr_schedule.value(global_vars["timestep"])
if self.framework == "tf":
self.get_session().run(
self._lr_update, feed_dict={self._lr_placeholder: new_val}
)
else:
self.cur_lr.assign(new_val, read_value=False)
# This property (self._optimizer) is (still) accessible for
# both TFPolicy and any TFPolicy_eager.
self._optimizer.learning_rate.assign(self.cur_lr)
@override(TFPolicy)
def optimizer(self):
return tf1.train.AdamOptimizer(learning_rate=self.cur_lr)
@DeveloperAPI
class EntropyCoeffSchedule:
"""Mixin for TFPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self._entropy_coeff_schedule = None
if entropy_coeff_schedule is None:
self.entropy_coeff = get_variable(
entropy_coeff, framework="tf", tf_name="entropy_coeff", trainable=False
)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self._entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1],
framework=None,
)
else:
# Implements previous version but enforces outside_value
self._entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0,
framework=None,
)
self.entropy_coeff = get_variable(
self._entropy_coeff_schedule.value(0),
framework="tf",
tf_name="entropy_coeff",
trainable=False,
)
if self.framework == "tf":
self._entropy_coeff_placeholder = tf1.placeholder(
dtype=tf.float32, name="entropy_coeff"
)
self._entropy_coeff_update = self.entropy_coeff.assign(
self._entropy_coeff_placeholder, read_value=False
)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
if self._entropy_coeff_schedule is not None:
new_val = self._entropy_coeff_schedule.value(global_vars["timestep"])
if self.framework == "tf":
self.get_session().run(
self._entropy_coeff_update,
feed_dict={self._entropy_coeff_placeholder: new_val},
)
else:
self.entropy_coeff.assign(new_val, read_value=False)
|
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase import ToontownGlobals
taskZoneId2pathId = {ToontownGlobals.SellbotFactoryInt: 'sellbotFactory',
ToontownGlobals.CashbotMintIntA: 'cashbotMint',
ToontownGlobals.CashbotMintIntB: 'cashbotMint',
ToontownGlobals.CashbotMintIntC: 'cashbotMint',
ToontownGlobals.LawbotOfficeInt: 'lawOfficeStage',
ToontownGlobals.LawbotStageIntA: 'lawOfficeStage',
ToontownGlobals.LawbotStageIntB: 'lawOfficeStage',
ToontownGlobals.LawbotStageIntC: 'lawOfficeStage',
ToontownGlobals.LawbotStageIntD: 'lawOfficeStage'}
Paths = {'sellbotFactory': {0: [Vec3(10.0, 0.0, 0.0),
Vec3(10.0, 10.0, 0.0),
Vec3(-10.0, 10.0, 0.0),
Vec3(-10.0, 0.0, 0.0)],
1: [Vec3(10.0, 5.0, 0.0), Vec3(10.0, 0.0, 0.0), Vec3(-10.0, -5.0, 0.0)],
2: [Vec3(-48.31, -0.001, 0),
Vec3(-48.0, -3.709, 0),
Vec3(35.041, -3.27, 0),
Vec3(34.751, -91.376, 0),
Vec3(39.869, -91.248, 0),
Vec3(39.93, -0.022, 0)],
3: [Vec3(-47.9110107422, -6.86798095703, 0.0),
Vec3(27.691986084, -5.68200683594, 0.0),
Vec3(34.049987793, 3.55303955078, 0.0),
Vec3(-39.983001709, 3.68499755859, 0.0)],
4: [Vec3(3.5649, 35.397, 0),
Vec3(-5.335, 36.067, 0),
Vec3(-4.605, -69.67, 0),
Vec3(17.815, -70.577, 0),
Vec3(17.4979, -50.997, 0),
Vec3(3.479, -46.775, 0)],
5: [Vec3(-2.993, -21.085, 0),
Vec3(5.209, -20.966, 0),
Vec3(2.164, 74.742, 0),
Vec3(-50.439, 78.55, 0),
Vec3(-52.042, 58.831, 0),
Vec3(-3.549, 57.295, 0)],
6: [Vec3(31.627, 2.093, 0.0), Vec3(4, 43, 0)],
7: [Vec3(34.627, 2.093, 0.0), Vec3(32, 43, 0)],
8: [Vec3(64.0, 43.0, 0.0), Vec3(59.5, 1.8, 0.0)],
9: [Vec3(84.0, 43.0, 0.0), Vec3(93.0, 25.0, 0.0), Vec3(66.0, 2.0, 0.0)],
10: [Vec3(85.0, 43.0, 0.0),
Vec3(66.0, 47.0, 0.0),
Vec3(53.0, 49.0, 0.0),
Vec3(71.0, 22.0, 0.0)],
11: [Vec3(63.0, 43.0, 0.0), Vec3(33.0, 47.0, 0.0)],
12: [Vec3(10.5139980316, 73.4393844604, 0.0), Vec3(-10.0053958893, 72.9239883423, 0.0), Vec3(5.55112314224, 90.2847213745, 0.0)],
13: [Vec3(-2.62728261948, 50.5329399109, 0.0), Vec3(-2.21770620346, 3.07684659958, 0.0)],
14: [Vec3(-15.9598636627, -15.0503959656, 0.0),
Vec3(8.35170555115, -17.5856513977, 0.0),
Vec3(8.34984397888, 3.9258685112, 0.0),
Vec3(-11.5888309479, 7.29379606247, 0.0)],
15: [Vec3(-25.7975006104, 18.4752807617, 0.0), Vec3(12.6321563721, 19.1084594727, 0.0), Vec3(24.8442764282, -4.62582397461, 0.0)],
16: [Vec3(6.70755004883, -4.16766357422, 0.0),
Vec3(-24.5565567017, -6.02560424805, 0.0),
Vec3(-24.4623031616, 15.6810913086, 0.0),
Vec3(3.93852233887, 17.1640014648, 0.0)],
17: [Vec3(0.474000930786, -29.1558818817, 0.0),
Vec3(0.460096359253, -16.5713024139, 0.0),
Vec3(27.9419631958, -16.0025310516, 0.0),
Vec3(28.3607826233, -38.4133338928, 0.0),
Vec3(27.2721481323, 0.622072696686, 0.0),
Vec3(-29.8864364624, 2.13151788712, 0.0),
Vec3(-28.907831192, -38.875164032, 0.0),
Vec3(-26.5185241699, -16.1851940155, 0.0),
Vec3(0.0833129882813, -16.7483196259, 0.0)],
18: [Vec3(-37.6936645508, 4.92616510391, 0.0),
Vec3(-6.09254932404, 3.79619073868, 0.0),
Vec3(-5.81788635254, 16.4960193634, 0.0),
Vec3(6.79872131348, 17.3943042755, 0.0),
Vec3(6.47452545166, 6.80963373184, 0.0),
Vec3(29.4301567078, 4.76448297501, 0.0)],
19: [Vec3(-11.6953277588, -13.8257198334, -0.00400207517669),
Vec3(-26.5939941406, -38.6423110962, -0.00400207517669),
Vec3(-12.9963378906, -54.1221084595, -0.00400207517669),
Vec3(5.45291900635, -50.0818252563, -0.00400207517669),
Vec3(0.500541687012, -27.6731319427, -0.00400207517669),
Vec3(10.6271972656, -16.9095211029, -0.00400207517669),
Vec3(3.95051574707, -11.6894283295, -0.00400207517669)],
20: [Vec3(1.44152832031, -18.6059322357, -0.00400207517669),
Vec3(-0.327140808105, -38.2261734009, -0.00400207517669),
Vec3(19.4757766724, -20.1440181732, -0.00400207517669),
Vec3(-13.617980957, -38.8843765259, -0.00400207517669),
Vec3(9.44762420654, -28.4113521576, -0.00400207517669)],
21: [Vec3(26.8655929565, -13.403295517, -0.00400207517669),
Vec3(32.0896224976, -27.145483017, -0.00400207517669),
Vec3(23.4544830322, -40.3218765259, -0.00400207517669),
Vec3(14.4268798828, -42.4280166626, -0.00400207517669),
Vec3(6.38285827637, -40.3579483032, -0.00400207517669),
Vec3(-2.16972351074, -26.8708248138, -0.00400207517669),
Vec3(-3.16332244873, -17.7787837982, -0.00400207517669),
Vec3(-3.16332244873, -17.7787837982, -0.00400207517669),
Vec3(2.0150680542, -5.54251718521, -0.00400207517669),
Vec3(7.65352630615, -5.11417245865, -0.00400207517669)],
22: [Vec3(-4.81932353973, 4.0960521698, 0.0),
Vec3(-37.2935218811, -35.963394165, 0.0),
Vec3(-32.3968849182, -49.8670196533, 0.0),
Vec3(-52.6336708069, -33.5889434814, 0.0),
Vec3(-53.0538520813, -21.5930957794, 0.0),
Vec3(-44.2506980896, -27.3775806427, 0.0),
Vec3(-1.58745121956, 3.91203117371, 0.0)],
23: [Vec3(5.80584430695, 1.62095463276, 0.0),
Vec3(-10.3068342209, -10.9403858185, 0.0),
Vec3(-27.9555549622, 11.9806346893, 0.0),
Vec3(-2.36316990852, -15.246049881, 0.0),
Vec3(-12.8535642624, -3.13337898254, 0.0)],
24: [Vec3(-12.7202939987, 15.884016037, 0.0), Vec3(16.7396354675, -14.0337085724, 0.0)],
25: [Vec3(-17.7801113129, -8.74084472656, 0.0), Vec3(-13.1200618744, -9.26202392578, -0.0)],
26: [Vec3(8.624584198, -10.9699707031, 0.0), Vec3(3.29245567322, -2.59405517578, 0.0)],
27: [Vec3(12.3031358719, 7.98439788818, 0.0), Vec3(-13.3801307678, 7.98439788818, 0.0)],
28: [Vec3(-32.1953697205, -85.9077148438, 0.0),
Vec3(-33.518032074, -60.6316223145, 0.0),
Vec3(-25.2015018463, -34.200378418, 0.0),
Vec3(-0.624415278435, -24.7206726074, 0.0)],
29: [Vec3(2.76529407501, -19.7032775879, 0.0),
Vec3(12.5924119949, -5.15737915039, 0.0),
Vec3(1.32620728016, 16.3902587891, 0.0),
Vec3(-13.4380140305, 18.5373535156, 0.0),
Vec3(-9.65627574921, -6.18496704102, 0.0),
Vec3(-24.4610538483, -31.9492492676, 0.0),
Vec3(-14.1554517746, -43.6148376465, 0.0)],
30: [Vec3(-0.175471186638, 24.0636901855, 0.0),
Vec3(-18.316400528, 61.1733856201, 0.0),
Vec3(-34.2972984314, 58.8515930176, 0.0),
Vec3(-18.7033634186, 27.5550231934, 0.0)],
31: [Vec3(25.8017997742, 73.792678833, 0.0), Vec3(11.0946779251, 51.4213256836, 0.0), Vec3(31.6651115417, 43.7239074707, 0.0)],
32: [Vec3(4.61576557159, 31.6040344238, 0.0),
Vec3(-10.1993589401, -7.03125, 0.0),
Vec3(-4.72947978973, -29.3165893555, 0.0),
Vec3(22.23179245, -28.7750854492, 0.0),
Vec3(11.0534486771, 0.391784667969, 0.0),
Vec3(18.2302970886, 28.4787597656, 0.0)],
33: [Vec3(8.04011249542, -51.311706543, 0.0),
Vec3(25.7791862488, -46.4603881836, 0.0),
Vec3(42.3690605164, -57.4282226563, 0.0),
Vec3(34.1978569031, -77.1849365234, 0.0)],
34: [Point3(2.75689697266, -21.3427124023, 0.0), Point3(2.78796386719, 37.0751342773, 0.000232696533203)],
35: [Point3(7.67395019531, 40.565612793, -0.000492095947266), Point3(78.0889282227, 39.3659667969, -0.00810623168945)],
36: [Point3(98.1296081543, 38.8174743652, -0.0102634429932),
Point3(104.098999023, 38.9267272949, -0.0109195709229),
Point3(105.774627686, 243.011169434, -0.0125999450684),
Point3(100.351745605, 242.852386475, -0.0125999450684)],
37: [Point3(152.121673584, 222.440261841, -5.0125999450684),
Point3(249.456481934, 223.478790283, -5.01259994507),
Point3(247.701660156, 233.899627686, -5.01259994507),
Point3(247.01550293, 224.162445068, -5.01259994507)],
38: [Point3(-1.65826416016, 25.0982055664, 0.025),
Point3(-1.44326782227, 9.02752685547, 0.025),
Point3(-8.76708984375, 11.2831420898, 0.025),
Point3(11.4426574707, 7.11584472656, 0.025),
Point3(2.77236938477, 7.02355957031, 0.025)],
39: [Point3(-3.69488525391, -1.89245605469, 0.025),
Point3(-5.73229980469, -9.62658691406, 0.025),
Point3(4.14199829102, -10.7971191406, 0.025),
Point3(7.77349853516, 1.58245849609, 0.025)],
40: [Point3(1.67153930664, 18.4436645508, 0.025), Point3(-7.59463500977, 18.5286254883, 0.025)],
41: [Point3(0.726699829102, -48.1898803711, -4.99999809265),
Point3(0.787185668945, -74.4460754395, -4.99999809265),
Point3(39.1846923828, -76.5356445313, -4.99999809265),
Point3(0.787185668945, -74.4460754395, -4.99999809265)],
42: [Vec3(6.0, -6.0, 0.0),
Vec3(6.0, 6.0, 0.0),
Vec3(-6.0, 6.0, 0.0),
Vec3(-6.0, -6.0, 0.0)]},
'cashbotMint': {28: [Vec3(-32.1953697205, -85.9077148438, 0.0),
Vec3(-33.518032074, -60.6316223145, 0.0),
Vec3(-25.2015018463, -34.200378418, 0.0),
Vec3(-0.624415278435, -24.7206726074, 0.0)],
29: [Vec3(2.76529407501, -19.7032775879, 0.0),
Vec3(12.5924119949, -5.15737915039, 0.0),
Vec3(1.32620728016, 16.3902587891, 0.0),
Vec3(-13.4380140305, 18.5373535156, 0.0),
Vec3(-9.65627574921, -6.18496704102, 0.0),
Vec3(-24.4610538483, -31.9492492676, 0.0),
Vec3(-14.1554517746, -43.6148376465, 0.0)],
30: [Vec3(-0.175471186638, 24.0636901855, 0.0),
Vec3(-18.316400528, 61.1733856201, 0.0),
Vec3(-34.2972984314, 58.8515930176, 0.0),
Vec3(-18.7033634186, 27.5550231934, 0.0)],
0: [Vec3(-5, 0.0, 0.0),
Vec3(-5, 10.0, 0.0),
Vec3(5.0, 10.0, 0.0),
Vec3(5.0, 0.0, 0.0)],
1: [Vec3(0.0, 0.0, 0.0), Vec3(-5.77, 10.0, 0.0), Vec3(5.77, 10.0, 0.0)],
2: [Vec3(-5.77, 10.0, 0.0),
Vec3(5.77, 10.0, 0.0),
Vec3(-5.77, -10.0, 0.0),
Vec3(5.77, -10.0, 0.0)],
3: [Vec3(-10, 0, 0), Vec3(10, 0, 0)]},
'lawOfficeStage': {28: [Vec3(-32.1953697205, -85.9077148438, 0.0),
Vec3(-33.518032074, -60.6316223145, 0.0),
Vec3(-25.2015018463, -34.200378418, 0.0),
Vec3(-0.624415278435, -24.7206726074, 0.0)],
29: [Vec3(2.76529407501, -19.7032775879, 0.0),
Vec3(12.5924119949, -5.15737915039, 0.0),
Vec3(1.32620728016, 16.3902587891, 0.0),
Vec3(-13.4380140305, 18.5373535156, 0.0),
Vec3(-9.65627574921, -6.18496704102, 0.0),
Vec3(-24.4610538483, -31.9492492676, 0.0),
Vec3(-14.1554517746, -43.6148376465, 0.0)],
30: [Vec3(-0.175471186638, 24.0636901855, 0.0),
Vec3(-18.316400528, 61.1733856201, 0.0),
Vec3(-34.2972984314, 58.8515930176, 0.0),
Vec3(-18.7033634186, 27.5550231934, 0.0)],
0: [Vec3(-5, 0.0, 0.0),
Vec3(-5, 10.0, 0.0),
Vec3(5.0, 10.0, 0.0),
Vec3(5.0, 0.0, 0.0)],
1: [Vec3(0.0, 0.0, 0.0), Vec3(-5.77, 10.0, 0.0), Vec3(5.77, 10.0, 0.0)],
2: [Vec3(-5.77, 10.0, 0.0),
Vec3(5.77, 10.0, 0.0),
Vec3(-5.77, -10.0, 0.0),
Vec3(5.77, -10.0, 0.0)],
3: [Vec3(-10, 0, 0), Vec3(10, 0, 0)]}}
|
from psycopg2 import IntegrityError
from odoo.tests import TransactionCase
from odoo.tools import mute_logger
class GlobalTestOpenacademyCourse(TransactionCase):
'''
Global test to openacademy course model
'''
# Method seudo-constructor of test setUp
def setUp(self):
#Define global variables to test methods
super(GlobalTestOpenacademyCourse, self).setUp()
self.course = self.env['openacademy.course']
# Method of clas taht don't is test
def create_course(self, course_name, course_description,
course_responsible_id):
course_id = self.course.create({
'name': course_name,
'description': course_description,
'responsible_id': course_responsible_id,
})
return course_id
# Method of test startswith 'def test_*(self)'
# Mute the error openerp.sql_db to avoid it in log
@mute_logger('odoo.sql_db')
def test_10_same_name_description(self):
'''
Test create a course with some name and description.
To test constraint of name different to description.
'''
# Error raised expected with meddage expected
with self.assertRaisesRegexp(
IntegrityError,
'new row for relation "openacademy_course" violates'
' check constraint "openacademy_course_name_description_check"'
):
# Created a course with same name and description to raise error
self.create_course('test', 'test', None)
@mute_logger('odoo.sql_db')
def test_20_two_courses_same_name(self):
'''
Test to create two courses with same name.
To raise constraint of unique name
'''
new_id = self.create_course('test1', 'test_description', None)
# print 'new_id', new_id
with self.assertRaisesRegexp(
IntegrityError,
'duplicate key value violates unique'
' constraint "openacademy_course_name_unique"'
):
new_id2 = self.create_course('test1', 'test_description', None)
# print 'new_id2', new_id2
def test_15_duplicate_course(self):
'''
Test to duplicate a course and check that work fine!
'''
course = self.env.ref('openacademy.course0')
course_id = course.copy()
# print 'course_id', course_id
self.assertTrue(course_id)
|
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.dataset_service import pagers
from google.cloud.aiplatform_v1.types import annotation
from google.cloud.aiplatform_v1.types import annotation_spec
from google.cloud.aiplatform_v1.types import data_item
from google.cloud.aiplatform_v1.types import dataset
from google.cloud.aiplatform_v1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1.types import dataset_service
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport
from .client import DatasetServiceClient
class DatasetServiceAsyncClient:
"""The service that handles the CRUD of Vertex AI Dataset and
its child resources.
"""
_client: DatasetServiceClient
DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT
annotation_path = staticmethod(DatasetServiceClient.annotation_path)
parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path)
annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path)
parse_annotation_spec_path = staticmethod(
DatasetServiceClient.parse_annotation_spec_path
)
data_item_path = staticmethod(DatasetServiceClient.data_item_path)
parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path)
dataset_path = staticmethod(DatasetServiceClient.dataset_path)
parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path)
common_billing_account_path = staticmethod(
DatasetServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DatasetServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(DatasetServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
DatasetServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
DatasetServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
DatasetServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(DatasetServiceClient.common_project_path)
parse_common_project_path = staticmethod(
DatasetServiceClient.parse_common_project_path
)
common_location_path = staticmethod(DatasetServiceClient.common_location_path)
parse_common_location_path = staticmethod(
DatasetServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DatasetServiceAsyncClient: The constructed client.
"""
return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DatasetServiceAsyncClient: The constructed client.
"""
return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return DatasetServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> DatasetServiceTransport:
"""Returns the transport used by the client instance.
Returns:
DatasetServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, DatasetServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the dataset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DatasetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DatasetServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_dataset(
self,
request: Union[dataset_service.CreateDatasetRequest, dict] = None,
*,
parent: str = None,
dataset: gca_dataset.Dataset = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_dataset():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
dataset = aiplatform_v1.Dataset()
dataset.display_name = "display_name_value"
dataset.metadata_schema_uri = "metadata_schema_uri_value"
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateDatasetRequest(
parent="parent_value",
dataset=dataset,
)
# Make the request
operation = client.create_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]):
The request object. Request message for
[DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset].
parent (:class:`str`):
Required. The resource name of the Location to create
the Dataset in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`):
Required. The Dataset to create.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.Dataset` A
collection of DataItems and Annotations on them.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, dataset])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.CreateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if dataset is not None:
request.dataset = dataset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_dataset,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_dataset.Dataset,
metadata_type=dataset_service.CreateDatasetOperationMetadata,
)
# Done; return the response.
return response
async def get_dataset(
self,
request: Union[dataset_service.GetDatasetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.Dataset:
r"""Gets a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_dataset():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDatasetRequest(
name="name_value",
)
# Make the request
response = client.get_dataset(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]):
The request object. Request message for
[DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset].
name (:class:`str`):
Required. The name of the Dataset
resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Dataset:
A collection of DataItems and
Annotations on them.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.GetDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_dataset,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_dataset(
self,
request: Union[dataset_service.UpdateDatasetRequest, dict] = None,
*,
dataset: gca_dataset.Dataset = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_dataset():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
dataset = aiplatform_v1.Dataset()
dataset.display_name = "display_name_value"
dataset.metadata_schema_uri = "metadata_schema_uri_value"
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1.UpdateDatasetRequest(
dataset=dataset,
)
# Make the request
response = client.update_dataset(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]):
The request object. Request message for
[DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset].
dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`):
Required. The Dataset which replaces
the resource on the server.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
[google.protobuf.FieldMask][google.protobuf.FieldMask].
Updatable fields:
- ``display_name``
- ``description``
- ``labels``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Dataset:
A collection of DataItems and
Annotations on them.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([dataset, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.UpdateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if dataset is not None:
request.dataset = dataset
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_dataset,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("dataset.name", request.dataset.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_datasets(
self,
request: Union[dataset_service.ListDatasetsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDatasetsAsyncPager:
r"""Lists Datasets in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_datasets():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDatasetsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]):
The request object. Request message for
[DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets].
parent (:class:`str`):
Required. The name of the Dataset's parent resource.
Format: ``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager:
Response message for
[DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.ListDatasetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_datasets,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatasetsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def delete_dataset(
self,
request: Union[dataset_service.DeleteDatasetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_dataset():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDatasetRequest(
name="name_value",
)
# Make the request
operation = client.delete_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]):
The request object. Request message for
[DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset].
name (:class:`str`):
Required. The resource name of the Dataset to delete.
Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.DeleteDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_dataset,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def import_data(
self,
request: Union[dataset_service.ImportDataRequest, dict] = None,
*,
name: str = None,
import_configs: Sequence[dataset.ImportDataConfig] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Imports data into a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_import_data():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
import_configs = aiplatform_v1.ImportDataConfig()
import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2']
import_configs.import_schema_uri = "import_schema_uri_value"
request = aiplatform_v1.ImportDataRequest(
name="name_value",
import_configs=import_configs,
)
# Make the request
operation = client.import_data(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]):
The request object. Request message for
[DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData].
name (:class:`str`):
Required. The name of the Dataset resource. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`):
Required. The desired input
locations. The contents of all input
locations will be imported in one batch.
This corresponds to the ``import_configs`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.ImportDataResponse`
Response message for
[DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, import_configs])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.ImportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if import_configs:
request.import_configs.extend(import_configs)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.import_data,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
dataset_service.ImportDataResponse,
metadata_type=dataset_service.ImportDataOperationMetadata,
)
# Done; return the response.
return response
async def export_data(
self,
request: Union[dataset_service.ExportDataRequest, dict] = None,
*,
name: str = None,
export_config: dataset.ExportDataConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports data from a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_export_data():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
export_config = aiplatform_v1.ExportDataConfig()
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1.ExportDataRequest(
name="name_value",
export_config=export_config,
)
# Make the request
operation = client.export_data(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]):
The request object. Request message for
[DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData].
name (:class:`str`):
Required. The name of the Dataset resource. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`):
Required. The desired output
location.
This corresponds to the ``export_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.ExportDataResponse`
Response message for
[DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, export_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.ExportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if export_config is not None:
request.export_config = export_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_data,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
dataset_service.ExportDataResponse,
metadata_type=dataset_service.ExportDataOperationMetadata,
)
# Done; return the response.
return response
async def list_data_items(
self,
request: Union[dataset_service.ListDataItemsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataItemsAsyncPager:
r"""Lists DataItems in a Dataset.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_data_items():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataItemsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_data_items(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]):
The request object. Request message for
[DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems].
parent (:class:`str`):
Required. The resource name of the Dataset to list
DataItems from. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager:
Response message for
[DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.ListDataItemsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_data_items,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDataItemsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_annotation_spec(
self,
request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> annotation_spec.AnnotationSpec:
r"""Gets an AnnotationSpec.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_annotation_spec():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetAnnotationSpecRequest(
name="name_value",
)
# Make the request
response = client.get_annotation_spec(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]):
The request object. Request message for
[DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec].
name (:class:`str`):
Required. The name of the AnnotationSpec resource.
Format:
``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.AnnotationSpec:
Identifies a concept with which
DataItems may be annotated with.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.GetAnnotationSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_annotation_spec,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_annotations(
self,
request: Union[dataset_service.ListAnnotationsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAnnotationsAsyncPager:
r"""Lists Annotations belongs to a dataitem
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_annotations():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListAnnotationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]):
The request object. Request message for
[DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations].
parent (:class:`str`):
Required. The resource name of the DataItem to list
Annotations from. Format:
``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager:
Response message for
[DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.ListAnnotationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_annotations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListAnnotationsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DatasetServiceAsyncClient",)
|
"""
This file contains information required by a problem for DCS I/O distribution.
"""
from typing import List, Dict
from itertools import groupby
from operator import itemgetter
from io_distribution.dcs_objects import *
from io_distribution.distance import *
from io_distribution.solvers.base_solver import *
class DistributionProblem:
""" This class contains information about a DCS I/O distribution problem.
:ivar list[Device] devices: the list of devices in DCS system. Used to generate I/O list.
:ivar list[DeviceInterfaceSignal] devices_interfaces: the list of device interfaces. Used to generate I/O list.
:ivar list[ModuleType] module_types: the module types provided by DCS vendor.
:ivar list[CabinetType] cabinet_types: the cabinet types provided by DCS vendor.
:ivar DistributionProblemConstants constants: constants
"""
def __init__(self, **kwargs):
self.devices = kwargs.get("devices", [])
self.device_interfaces = kwargs.get("device_interfaces", [])
self.module_types = kwargs.get("module_types", [])
self.cabinet_types = kwargs.get("cabinet_types", [])
self.constants = kwargs.get("constants", DistributionProblemConstants())
self.io_centroids = None
self.io_assignments = None
self.cabinet_centroids = None
self.cabinet_assignments = None
self.signals = self.build_signals()
self.distance_calculator = Distance
def build_signals(self) -> list:
""" Build signals from devices and device_interfaces.
"""
signals = []
for device in self.devices:
for interface in self.device_interfaces:
if interface.name == device.device_interface_name:
signals.append(Signal(device, interface))
return signals
def get_distance(self, signal1: Signal, signal2: Signal) -> float:
""" Calculate the two signals' distance.
"""
return Distance(self, signal1, signal2).get_distance()
def get_iocard_distance(self, signal1: Signal, signal2: Signal) -> float:
""" Calculate the two io module's distance.
"""
return Distance(self, signal1, signal2).get_iocard_distance()
def solve(self, iocard_solver: BaseSolver, cabinet_solver: BaseSolver = None):
iocard_solver.problem = self
print("Solving distribution using ", type(iocard_solver))
self.io_centroids, self.io_assignments = iocard_solver.solve(self.signals, self.get_distance, self.check_capability)
if cabinet_solver is None:
return self.io_centroids, self.io_assignments
else:
cabinet_solver.problem = self
print("Solving iocard using ", type(cabinet_solver))
self.cabinet_centroids, self.cabinet_assignments = cabinet_solver.solve(self.io_centroids, self.get_iocard_distance, self.check_cabinet_capability)
return self.io_centroids, self.io_assignments, self.cabinet_centroids, self.cabinet_assignments
def get_signal(self, name):
result = tuple(filter(lambda x: x.name == name, self.signals))
if len(result) > 0:
return result[0]
return None
def check_capability(self, io_group: list):
return any(len(io_group) < i.capability for i in self.get_available_module_types(io_group))
def check_cabinet_capability(self, io_gorup: List[Signal]):
return any(len(io_gorup) < i.capability for i in self.cabinet_types)
def get_available_module_types(self, io_group: List[Signal]) -> List[ModuleType]:
available_module_types = []
for i in self.module_types:
if all(io.device_interface.signal_type in i.accepting_signal_types for io in io_group):
available_module_types.append(i)
return available_module_types
def get_module_type(self, io_group: List[Signal]) -> ModuleType:
available_module_types = self.get_available_module_types(io_group)
return min(available_module_types, key=lambda x: x.cost)
def save_results(self, file: str):
import xlwt
workbook = xlwt.Workbook()
i = 0
self.cabinet_assignments.sort(key=lambda x: x.assign_to_signal)
grouped_cabinet_assignments = groupby(self.cabinet_assignments, key=lambda x: x.assign_to_signal)
# 机柜中心
for cabinet_centroid, cabinet_assignment_items in grouped_cabinet_assignments:
cabinet_assignment_items = list(cabinet_assignment_items)
i += 1
sheet = workbook.add_sheet("CAB-%03d" % i)
# 卡件中心
j = 0
for cabinet_assignment in cabinet_assignment_items:
io_assignments = list(filter(lambda x: x.assign_to_signal == cabinet_assignment.signal, self.io_assignments))
module_type = self.get_module_type([a.signal for a in io_assignments])
sheet.write(0, j, "%s {%f}" % (module_type.name, cabinet_assignment.distance))
sheet.col(j).width = 256 * 25
k = 1
# 所有分配至该卡件中心的点
for io_assignment in io_assignments:
sheet.write(k, j, "%s (%f)" % (io_assignment.signal.name, io_assignment.distance))
k += 1
j += 1
workbook.save(file)
class DistributionProblemConstants:
""" This class provides some constants for distribution problem.
"""
def __init__(self):
self.safety_seperation_satisfied_distance = 0
self.safety_seperation_not_satisfied_distance = float('inf')
self.location_map = {"a": {"b": 0.2, "c": 0.4},
"b": {"a": 0.2, "c": 0.2},
"c": {"a": 0.4, "b": 0.2}}
self.group_devices_distance = 0
self.default_devices_distance = 0.5
self.redundant_devices_distance = 1
self.run_in_blackout_satisfied_distance = 0
self.run_in_blackout_not_satisfied_distance = 1
self.function_group_equal_distance = 0
self.function_group_related_distance = 0.4
self.function_group_not_related_distance = 1
self.function_group_map = {"Protect": [],
"Control": ["BOP"],
"Diverse": [],
"BOP": ["Control"]}
self.system_equal_distance = 0
self.system_not_equal_distance = 1
self.distance_norm_function = 2
""" acceptable value (from numpy norm function):
ord norm for matrices norm for vectors
None Frobenius norm 2-norm
‘fro’ Frobenius norm –
‘nuc’ nuclear norm –
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 – sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other – sum(abs(x)**ord)**(1./ord)
"""
|
"""This example runs a report from a saved query.
"""
import tempfile
from googleads import ad_manager
from googleads import errors
SAVED_QUERY_ID = 'INSERT_SAVED_QUERY_ID_HERE'
def main(client, saved_query_id):
# Initialize appropriate service.
report_service = client.GetService('ReportService', version='v202111')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v202111')
# Create statement object to filter for an order.
statement = (ad_manager.StatementBuilder(version='v202111')
.Where('id = :id')
.WithBindVariable('id', int(saved_query_id))
.Limit(1))
response = report_service.getSavedQueriesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
saved_query = response['results'][0]
if saved_query['isCompatibleWithApiVersion']:
report_job = {}
# Set report query and optionally modify it.
report_job['reportQuery'] = saved_query['reportQuery']
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.AdManagerReportError as e:
print('Failed to generate report. Error was: %s' % e)
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print('Report job with id "%s" downloaded to:\n%s' % (
report_job_id, report_file.name))
else:
print('The query specified is not compatible with the API version.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, SAVED_QUERY_ID)
|
def save_array_as_PNG(img, fname, ftype_req = -1):
"""
Save an array as a PNG.
img: a 3D NumPy array of type uint8 with shape (ny,nx,nc)
fname: output file name
ftype_req = filter type to be used
The PNG specification defines 5 different possible filters which are
numbered 0 to 4 (inclusively). Filter #0 is "no filtering". If the user
defines "ftype_req" as one of the identifying integers then that filter will
be used for the entire PNG file. If the user defines "ftype_req" as "-1" (or
does not define "ftype_req" at all) then adaptive filtering will be used
whereby an attempt is made to predict which filtering method will yield the
smallest compressed stream.
"""
# Import modules ...
import binascii
import numpy
import zlib
# Load sub-functions ...
from .paeth_filter import paeth_filter
# Find image size ...
ny, nx, nc = img.shape
# Try opening the PNG ...
with open(fname, "wb") as fobj:
# **********************************************************************
# * WRITE THE SIGNATURE *
# **********************************************************************
fobj.write(binascii.unhexlify("89504E470D0A1A0A"))
# **********************************************************************
# * CREATE "IHDR" CHUNK AND WRITE IT *
# **********************************************************************
ihdr = bytearray()
ihdr += numpy.uint32(13).byteswap().tobytes() # Length
ihdr += bytearray("IHDR") # Chunk type
ihdr += numpy.uint32(nx).byteswap().tobytes() # IHDR : Width
ihdr += numpy.uint32(ny).byteswap().tobytes() # IHDR : Height
ihdr += numpy.uint8(8).byteswap().tobytes() # IHDR : Bit depth
ihdr += numpy.uint8(2).byteswap().tobytes() # IHDR : Colour type
ihdr += numpy.uint8(0).byteswap().tobytes() # IHDR : Compression method
ihdr += numpy.uint8(0).byteswap().tobytes() # IHDR : Filter method
ihdr += numpy.uint8(0).byteswap().tobytes() # IHDR : Interlace method
ihdr += numpy.uint32(binascii.crc32(ihdr[4:])).byteswap().tobytes() # CRC-32
fobj.write(ihdr)
# **********************************************************************
# * CREATE "IDAT" CHUNK AND WRITE IT *
# **********************************************************************
idat = bytearray()
idat += numpy.uint32(0).byteswap().tobytes() # Length
idat += bytearray("IDAT") # Chunk type
stream = ""
# Loop over rows ...
for iy in xrange(ny):
row = numpy.zeros((5, nc, nx), dtype = numpy.uint8)
# Calculate stream for "none" filter (if required) ...
if ftype_req == -1 or ftype_req == 0:
ftype = 0
for ix in xrange(nx):
row[ftype, :, ix] = img[iy, ix, :]
# Calculate stream for "sub" filter (if required) ...
if ftype_req == -1 or ftype_req == 1:
ftype = 1
for ix in xrange(nx):
for ic in xrange(nc):
if ix == 0:
p1 = numpy.int16(0)
else:
p1 = img[iy, ix - 1, ic].astype(numpy.int16)
diff = img[iy, ix, ic].astype(numpy.int16) - p1
diff = numpy.mod(diff, 256)
row[ftype, ic, ix] = diff.astype(numpy.uint8)
# Calculate stream for "up" filter (if required) ...
if ftype_req == -1 or ftype_req == 2:
ftype = 2
for ix in xrange(nx):
for ic in xrange(nc):
if iy == 0:
p1 = numpy.int16(0)
else:
p1 = img[iy - 1, ix, ic].astype(numpy.int16)
diff = img[iy, ix, ic].astype(numpy.int16) - p1
diff = numpy.mod(diff, 256)
row[ftype, ic, ix] = diff.astype(numpy.uint8)
# Calculate stream for "average" filter (if required) ...
if ftype_req == -1 or ftype_req == 3:
ftype = 3
for ix in xrange(nx):
for ic in xrange(nc):
if ix == 0:
p1 = numpy.int16(0)
else:
p1 = img[iy, ix - 1, ic].astype(numpy.int16)
if iy == 0:
p2 = numpy.int16(0)
else:
p2 = img[iy - 1, ix, ic].astype(numpy.int16)
diff = img[iy, ix, ic].astype(numpy.int16) - numpy.int16((p1.astype(numpy.int16) + p2.astype(numpy.int16)) / numpy.int16(2))
diff = numpy.mod(diff, 256)
row[ftype, ic, ix] = diff.astype(numpy.uint8)
# Calculate stream for "Paeth" filter (if required) ...
if ftype_req == -1 or ftype_req == 4:
ftype = 4
for ix in xrange(nx):
for ic in xrange(nc):
if ix == 0:
p1 = numpy.int16(0)
else:
p1 = img[iy, ix - 1, ic].astype(numpy.int16)
if iy == 0:
p2 = numpy.int16(0)
else:
p2 = img[iy - 1, ix, ic].astype(numpy.int16)
if ix == 0 or iy == 0:
p3 = numpy.int16(0)
else:
p3 = img[iy - 1, ix - 1, ic].astype(numpy.int16)
diff = img[iy, ix, ic].astype(numpy.int16) - paeth_filter(p1, p2, p3).astype(numpy.int16)
diff = numpy.mod(diff, 256)
row[ftype, ic, ix] = diff.astype(numpy.uint8)
# Figure out which stream to use ...
if ftype_req == -1:
tmp1 = numpy.uint64(255 * nx)
for ftype in xrange(5):
tmp2 = row[ftype, :, :].sum().astype(numpy.uint64)
if tmp2 < tmp1:
tmp1 = tmp2
ftype_best = ftype
else:
ftype_best = ftype_req
# Use the best/requested stream for this row ...
stream += numpy.uint8(ftype_best).byteswap().tobytes()
for ix in xrange(nx):
stream += row[ftype_best, :, ix].astype(numpy.uint8).byteswap().tobytes()
idat += zlib.compress(stream, 9) # IDAT : Data
idat[0:4] = numpy.uint32(len(idat[8:])).byteswap().tobytes() # Length
idat += numpy.uint32(binascii.crc32(idat[4:])).byteswap().tobytes() # CRC-32
fobj.write(idat)
# **********************************************************************
# * CREATE "IEND" CHUNK AND WRITE IT *
# **********************************************************************
iend = bytearray()
iend += numpy.uint32(0).byteswap().tobytes() # Length
iend += bytearray("IEND") # Chunk type
iend += numpy.uint32(binascii.crc32(iend[4:])).byteswap().tobytes() # CRC-32
fobj.write(iend)
|
__source__ = ' https://leetcode.com/problems/accounts-merge/'
import unittest
import collections
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
em_to_name = {}
graph = collections.defaultdict(set)
for acc in accounts:
name = acc[0]
for email in acc[1:]:
graph[acc[1]].add(email)
graph[email].add(acc[1])
em_to_name[email] = name
seen = set()
ans = []
for email in graph:
if email not in seen:
seen.add(email)
stack = [email]
component = []
while stack:
node = stack.pop()
component.append(node)
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
ans.append([em_to_name[email]] + sorted(component))
return ans
class Solution2(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
dsu = DSU()
em_to_name = {}
em_to_id = {}
i = 0
for acc in accounts:
name = acc[0]
for email in acc[1:]:
em_to_name[email] = name
if email not in em_to_id:
em_to_id[email] = i
i += 1
dsu.union(em_to_id[acc[1]], em_to_id[email])
ans = collections.defaultdict(list)
for email in em_to_name:
ans[dsu.find(em_to_id[email])].append(email)
return [[em_to_name[v[0]]] + sorted(v) for v in ans.values() ]
class DSU:
def __init__(self):
self.p = range(10001)
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
self.p[self.find(x)] = self.find(y)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
class Solution {
public List<List<String>> accountsMerge(List<List<String>> accounts) {
Map<String, String> emailToName = new HashMap();
Map<String, ArrayList<String>> graph = new HashMap();
for (List<String> account : accounts) {
String name = "";
for (String email : account) {
if (name == "") {
name = email;
continue;
}
graph.computeIfAbsent(email, x -> new ArrayList<String>()).add(account.get(1));
graph.computeIfAbsent(account.get(1), x -> new ArrayList<String>()).add(email);
emailToName.put(email, name);
}
}
Set<String> seen = new HashSet();
List<List<String>> ans = new ArrayList();
for (String email: graph.keySet()) {
if (!seen.contains(email)) {
seen.add(email);
Stack<String> stack = new Stack();
stack.push(email);
List<String> component = new ArrayList();
while (!stack.empty()) {
String node = stack.pop();
component.add(node);
for (String nei : graph.get(node)) {
if (!seen.contains(nei)) {
seen.add(nei);
stack.push(nei);
}
}
}
Collections.sort(component);
component.add(0, emailToName.get(email));
ans.add(component);
}
}
return ans;
}
}
class Solution {
//union find
class Node {
String name;
ArrayList<String> accounts;
Node parent;
public Node(String name) {
this.name = name;
accounts = new ArrayList();
parent = this;
}
}
Node findParent(Node node) {
while (node.parent != node) {
// path compression
node.parent = node.parent.parent;
node = node.parent;
}
return node;
}
public List<List<String>> accountsMerge(List<List<String>> accounts) {
HashMap<String, Node> map = new HashMap();
List<Node> all = new ArrayList();
//group
for (int i = 0; i < accounts.size(); i++) {
List<String> cur = accounts.get(i);
Node node = new Node(cur.get(0));
for (int j = 1; j < cur.size(); j++) {
String email = cur.get(j);
if (!map.containsKey(email)) {
map.put(email, node);
node.accounts.add(email);
} else { //group them to the same parent
Node node_p = findParent(node);
Node exist_p = findParent(map.get(email));
node_p.parent = exist_p;
}
}
all.add(node);
}
// add account to parent
for (Node node : all) {
if (node.parent != node) {
Node p = findParent(node);
p.accounts.addAll(node.accounts);
}
}
//get ans email list
List<List<String>> res = new ArrayList();
for (Node node : all) {
if (node.parent == node) {
List<String> tmp = new ArrayList();
tmp.add(node.name);
Collections.sort(node.accounts);
tmp.addAll(node.accounts);
res.add(tmp);
}
}
return res;
}
}
class Solution {
public List<List<String>> accountsMerge(List<List<String>> accounts) {
DSU dsu = new DSU();
Map<String, String> emailToName = new HashMap();
Map<String, Integer> emailToID = new HashMap();
int id = 0;
for (List<String> account: accounts) {
String name = "";
for (String email: account) {
if (name == "") {
name = email;
continue;
}
emailToName.put(email, name);
if (!emailToID.containsKey(email)) {
emailToID.put(email, id++);
}
dsu.union(emailToID.get(account.get(1)), emailToID.get(email));
}
}
Map<Integer, List<String>> ans = new HashMap();
for (String email: emailToName.keySet()) {
int index = dsu.find(emailToID.get(email));
ans.computeIfAbsent(index, x-> new ArrayList()).add(email);
}
for (List<String> component: ans.values()) {
Collections.sort(component);
component.add(0, emailToName.get(component.get(0)));
}
return new ArrayList(ans.values());
}
}
class DSU {
int[] parent;
public DSU() {
parent = new int[10001];
for (int i = 0; i <= 10000; ++i)
parent[i] = i;
}
public int find(int x) {
if (parent[x] != x) parent[x] = find(parent[x]);
return parent[x];
}
public void union(int x, int y) {
parent[find(x)] = find(y);
}
}
class Solution {
public List<List<String>> accountsMerge(List<List<String>> accounts) {
Map<String, String> roots = new HashMap();
Map<String, String> owner = new HashMap();
Map<String, TreeSet<String>> unions = new HashMap();
for (List<String> acc : accounts) {
for (int i = 1; i < acc.size(); i++) {
roots.put(acc.get(i), acc.get(i));
owner.put(acc.get(i), acc.get(0));
}
}
//union all emails in the same account list
for (List<String> acc : accounts) {
String x = find(acc.get(1), roots);
for (int i = 2; i < acc.size(); i++) {
roots.put(find(acc.get(i), roots), x);
}
}
//union emails accros diff accounts
for (List<String> acc : accounts) {
String x = find(acc.get(1), roots);
unions.computeIfAbsent(x, k-> new TreeSet<String>());
for (int i = 1; i < acc.size(); i++) {
unions.get(x).add(acc.get(i));
}
}
List<List<String>> res = new ArrayList();
for (String e : unions.keySet()) {
List<String> emails = new ArrayList(unions.get(e));
emails.add(0, owner.get(e));
res.add(emails);
}
return res;
}
private String find(String x, Map<String, String> roots) {
return roots.get(x).equals(x) ? x : find(roots.get(x), roots);
}
}
'''
|
from . import detection
from argparse import ArgumentParser
def size_tuple(size_str):
w, h = size_str.split('x')
return (int(w), int(h))
def main():
parser = ArgumentParser(description='Process some integers.')
parser.add_argument('--fps', type=int, default=10)
parser.add_argument('--size', type=size_tuple, default=(1280,720), help='The frame size in WxH')
parser.add_argument('--codec', default='', help='Codec used to encode the video')
parser.add_argument('--ext', default='mkv', help='File extension of the output videos')
args = parser.parse_args()
detection.start(args)
|
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template.base import Library
from django.utils import six
from django.utils.encoding import smart_text
from leonardo.module.web.widget import ApplicationWidget
from leonardo.module.web.widget.application.reverse import app_reverse as do_app_reverse
register = Library()
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = {
smart_text(k, 'ascii'): v.resolve(context)
for k, v in self.kwargs.items()
}
view_name = self.view_name.resolve(context)
try:
current_app = context.request.current_app
except AttributeError:
# Change the fallback value to None when the deprecation path for
# Context.current_app completes in Django 2.0.
current_app = context.current_app
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(
view_name, args=args, kwargs=kwargs, current_app=current_app)
except NoReverseMatch:
# try external apps
for urlconf, config in six.iteritems(
ApplicationWidget._feincms_content_models[0].ALL_APPS_CONFIG):
partials = view_name.split(':')[1:]
try:
url = do_app_reverse(
':'.join(partials), urlconf, args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
pass
else:
return url
exc_info = sys.exc_info()
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
six.reraise(*exc_info)
else:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
|
import json
import uuid
import sqlalchemy as sql
from sqlalchemy import orm
ENDPOINT_TYPES = ['public', 'internal', 'admin']
def upgrade(migrate_engine):
"""Split each legacy endpoint into separate records for each interface."""
meta = sql.MetaData()
meta.bind = migrate_engine
legacy_table = sql.Table('endpoint_v2', meta, autoload=True)
new_table = sql.Table('endpoint_v3', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
for ref in session.query(legacy_table).all():
# pull urls out of extra
extra = json.loads(ref.extra)
urls = dict((i, extra.pop('%surl' % i)) for i in ENDPOINT_TYPES)
for interface in ENDPOINT_TYPES:
endpoint = {
'id': uuid.uuid4().hex,
'legacy_endpoint_id': ref.id,
'interface': interface,
'region': ref.region,
'service_id': ref.service_id,
'url': urls[interface],
'extra': json.dumps(extra),
}
insert = new_table.insert().values(endpoint)
migrate_engine.execute(insert)
session.commit()
session.close()
def downgrade(migrate_engine):
"""Re-create the v2 endpoints table based on v3 endpoints."""
meta = sql.MetaData()
meta.bind = migrate_engine
legacy_table = sql.Table('endpoint_v2', meta, autoload=True)
new_table = sql.Table('endpoint_v3', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
for ref in session.query(new_table).all():
q = session.query(legacy_table)
q = q.filter_by(id=ref.legacy_endpoint_id)
legacy_ref = q.first()
if legacy_ref:
# We already have one, so just update the extra
# attribute with the urls.
extra = json.loads(legacy_ref.extra)
extra['%surl' % ref.interface] = ref.url
values = {'extra': json.dumps(extra)}
update = legacy_table.update().\
where(legacy_table.c.id == legacy_ref.id).\
values(values)
migrate_engine.execute(update)
else:
# This is the first one of this legacy ID, so
# we can insert instead.
extra = json.loads(ref.extra)
extra['%surl' % ref.interface] = ref.url
endpoint = {
'id': ref.legacy_endpoint_id,
'region': ref.region,
'service_id': ref.service_id,
'extra': json.dumps(extra),
}
insert = legacy_table.insert().values(endpoint)
migrate_engine.execute(insert)
session.commit()
session.close()
|
import os
from django.test import TestCase
from django.test.utils import override_settings
from django.template import Template, Context
from achilles import blocks
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'templates/'),),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
)
class BlocksTests(TestCase):
@classmethod
def setUpClass(cls):
cls.register = blocks.Library()
def test_render_function_block(self):
@self.register.block(template_name='block_template.html')
def message():
return {'message': 'foo'}
out = Template(
"{% load achilles %}"
"{% ablock 'message' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message">foo\n</div>')
def test_render_function_block_with_context(self):
@self.register.block(template_name='block_template.html',
takes_context=True)
def message(context):
return {'message': 'foo'}
out = Template(
"{% load achilles %}"
"{% ablock 'message' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message">foo\n</div>')
def test_render_function_block_returning_same_context(self):
@self.register.block(template_name='block_template.html',
takes_context=True)
def message(context):
context['message'] = 'foo'
return context
out = Template(
"{% load achilles %}"
"{% ablock 'message' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message">foo\n</div>')
def test_render_class_block(self):
@self.register.block('message')
class Message(blocks.Block):
template_name = 'block_template.html'
def get_context_data(self, *args, **kwargs):
context = super(Message, self).get_context_data(*args,
**kwargs)
context.update({'message': 'foo'})
return context
out = Template(
"{% load achilles %}"
"{% ablock 'message' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message">foo\n</div>')
def test_render_lazy_block(self):
@self.register.block(template_name="block_template.html")
def message_lazy(self, *args, **kwargs):
return {'message': 'foo'}
out = Template(
"{% load achilles %}"
"{% ablock_lazy 'message_lazy' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message_lazy" ' +
'data-ablock-lazy="true"></div>')
def test_render_noload_block(self):
@self.register.block(template_name="block_template.html")
def message_noload(self, *args, **kwargs):
return {'message': 'foo'}
out = Template(
"{% load achilles %}"
"{% ablock_noload 'message_noload' %}").render(Context())
self.assertEqual(out, '<div data-ablock="message_noload" ' +
'data-ablock-noload="true"></div>')
|
import os
import multiprocessing
import Queue
import logging
import sys
import types
import signal
from ctypes import CDLL
import cPickle as pickle
import misc
from crawler_exceptions import CrawlTimeoutError, CrawlError
logger = logging.getLogger('crawlutils')
try:
libc = CDLL('libc.so.6')
except Exception as e:
logger.warning('Can not crawl containers as there is no libc: %s' % e)
raise e
ALL_NAMESPACES = 'user pid uts ipc net mnt'.split()
IN_CONTAINER_TIMEOUT = 30
def get_pid_namespace(pid):
try:
ns = os.stat('/proc/' + str(pid) + '/ns/pid').st_ino
return ns
except Exception:
logger.debug('There is no container with pid=%s running.'
% pid)
return None
class ProcessContext:
def __init__(self, pid, namespaces):
self.namespaces = namespaces
self.pid = pid
def attach(self):
# Just to be sure log rotation does not happen in the container
logging.disable(logging.CRITICAL)
try:
self.host_ns_fds = {}
self.container_ns_fds = {}
self.host_cwd = os.getcwd()
open_process_namespaces('self', self.host_ns_fds,
self.namespaces)
open_process_namespaces(self.pid, self.container_ns_fds,
self.namespaces)
except Exception as e:
logging.disable(logging.NOTSET)
logger.exception(e)
raise
try:
attach_to_process_namespaces(self.container_ns_fds,
self.namespaces)
except Exception as e:
logging.disable(logging.NOTSET)
error_msg = (
'Could not attach to the pid=%s container mnt namespace. '
'Exception: %s' % (self.pid, e))
logger.error(error_msg)
self.detach()
raise
def detach(self):
try:
# Re-attach to the process original namespaces before attaching the
# first time to self.pid namespaces.
attach_to_process_namespaces(self.host_ns_fds,
self.namespaces)
except Exception as e:
logging.disable(logging.NOTSET)
logger.error('Could not move back to the host: %s' % e)
# XXX can't recover from this one. But it would be better to
# bubble up the error.
sys.exit(1)
# We are now in host context
try:
os.chdir(self.host_cwd)
except Exception as e:
logger.error('Could not move to the host cwd: %s' % e)
raise
logging.disable(logging.NOTSET)
try:
close_process_namespaces(self.container_ns_fds,
self.namespaces)
close_process_namespaces(self.host_ns_fds, self.namespaces)
except Exception as e:
logger.warning('Could not close the namespaces: %s' % e)
def run_as_another_namespace(
pid,
namespaces,
function,
*args,
**kwargs
):
hack_to_pre_load_modules()
context = ProcessContext(pid, namespaces)
context.attach()
queue = multiprocessing.Queue(2 ** 15)
try:
child_process = multiprocessing.Process(
name='crawler-%s' %
pid, target=function_wrapper, args=(
queue, function, args), kwargs=kwargs)
child_process.start()
except OSError:
queue.close()
raise CrawlError()
child_exception = None
try:
(result, child_exception) = queue.get(timeout=IN_CONTAINER_TIMEOUT)
except Queue.Empty:
child_exception = CrawlTimeoutError()
except Exception:
result = None
if child_exception:
result = None
child_process.join(IN_CONTAINER_TIMEOUT)
# The join failed and the process might still be alive
if child_process.is_alive():
errmsg = ('Timed out waiting for process %d to exit.' %
child_process.pid)
queue.close()
os.kill(child_process.pid, 9)
context.detach()
logger.error(errmsg)
raise CrawlTimeoutError(errmsg)
context.detach()
if result is None:
if child_exception:
raise child_exception
raise CrawlError('Unknown crawl error.')
return result
def function_wrapper(
queue,
function,
*args,
**kwargs
):
# Die if the parent dies
PR_SET_PDEATHSIG = 1
libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP)
def signal_handler_sighup(*args):
logger.warning('Crawler parent process died, so exiting... Bye!')
queue.close()
exit(1)
signal.signal(signal.SIGHUP, signal_handler_sighup)
result = None
try:
args = args[0]
result = function(*args)
# if res is a generator (i.e. function uses yield)
if isinstance(result, types.GeneratorType):
result = list(result)
queue.put((result, None))
queue.close()
sys.exit(0)
except Exception as e:
queue.put((None, e))
queue.close()
sys.exit(1)
def hack_to_pre_load_modules():
queue = multiprocessing.Queue()
def foo(queue):
queue.put('dummy')
pass
p = multiprocessing.Process(target=foo, args=(queue, ))
p.start()
p.join()
queue.get()
def open_process_namespaces(pid, namespace_fd, namespaces):
for ct_ns in namespaces:
try:
# arg 0 means readonly
namespace_fd[ct_ns] = libc.open('/proc/' + pid + '/ns/' +
ct_ns, 0)
if namespace_fd[ct_ns] == -1:
errno_msg = misc.get_errno_msg(libc)
error_msg = 'Opening the %s namespace file failed: %s' \
% (ct_ns, errno_msg)
logger.warning(error_msg)
if ct_ns == 'mnt':
raise Exception(error_msg)
except Exception as e:
error_msg = 'The open() syscall failed with: %s' % e
logger.warning(error_msg)
if ct_ns == 'mnt':
raise e
def close_process_namespaces(namespace_fd, namespaces):
for ct_ns in namespaces:
try:
libc.close(namespace_fd[ct_ns])
except Exception as e:
error_msg = 'The close() syscall failed with: %s' % e
logger.warning(error_msg)
def attach_to_process_namespaces(namespace_fd, ct_namespaces):
for ct_ns in ct_namespaces:
try:
if hasattr(libc, 'setns'):
r = libc.setns(namespace_fd[ct_ns], 0)
else:
# The Linux kernel ABI should be stable enough
__NR_setns = 308
r = libc.syscall(__NR_setns, namespace_fd[ct_ns], 0)
if r == -1:
errno_msg = misc.get_errno_msg(libc)
error_msg = ('Could not attach to the container %s '
'namespace (fd=%s): %s' %
(ct_ns, namespace_fd[ct_ns], errno_msg))
logger.warning(error_msg)
if ct_ns == 'mnt':
raise Exception(error_msg)
except Exception as e:
error_msg = 'The setns() syscall failed with: %s' % e
logger.warning(error_msg)
if ct_ns == 'mnt':
logger.exception(e)
raise e
|
from lib.base import BaseVaporIOAction
__all__ = [
'ReadSensor'
]
class ReadSensor(BaseVaporIOAction):
def run(self, host, sensor_type, board_id, port_id, ssl=False):
endpoint = "read/%s/%s/%s" % (sensor_type, board_id, port_id)
data = self._get_request(host=host, endpoint=endpoint, ssl=ssl)
return data
|
from dot import *
def repr(s):
return '"' + s + '"'
Debug.turnon()
BlockBegin("digraph G",
bgcolor = repr('grey'),
style = repr('filled'),
)
update_node_class("base",
shape = "plaintext",
style = repr("rounded,filled"),
fontname = repr("Monaco"),
color = repr("white"),
)
update_edge_class("base",
fontcolor="slategray",
fontsize="12",
fontname="Monaco",
)
register_edge_class("encoder_class",
color = "yellow",
fontcolor = "white",
fontname = repr("Monaco"),
)
register_edge_class("outer_class",
)
Math.output_root = '_tmp/'
def NodeLabelTpl(name, kind="", size="", mathcode=""):
'''
paddle node with math
'''
tpl = ''.join(
cur_prefix_space() + i for i in [
"<<table>\n",
" <tr><td bgcolor=\"yellow\" colspan=\"2\"><font color=\"blue\" point-size=\"18\">{name}</font></td></tr>\n",
" <tr><td bgcolor=\"cornsilk\">{kind}</td><td bgcolor=\"cornsilk\">{size}</td></tr>\n",
" <tr><td colspan=\"2\"><img src=\"{img}\"/></td></tr>\n" if mathcode else "",
"</table>>\n",
])
return tpl.format(
name = name,
kind = kind,
size = size,
img = math_img( name, mathcode) if mathcode else "",
)
def EdgeLabelTpl(name, mathcode=""):
'''
paddle edge with math
'''
tpl = ''.join(
cur_prefix_space() + i for i in [
"<<table>\n",
" <tr><td>{name}</td></tr>\n",
" <tr><td><img src=\"{img}\"/></td></tr>" if mathcode else "",
"</table>>\n"
])
return tpl.format(
name = name,
img = math_img( name, mathcode) if mathcode else "",
)
Node("raw_query_word", "base",
color="yellow",
label = NodeLabelTpl(
name = "raw_query_word",
mathcode = "x_i",
))
Node("raw_query_embeding", "base",
label = NodeLabelTpl("raw_query_embeding",
mathcode = "E_{x_i}",
)
)
Node("new_query_word", "base",
color="yellow",
label = NodeLabelTpl("new_query_word",
kind = "DataLayer",
size = "latent_dim",
mathcode = r'''
y_{i-1}
'''
),
)
Node("new_query_next_word", "base",
label = NodeLabelTpl("new_query_next_word",
"DataLayer",
),
color = "yellow"
)
Node("new_query_embedding", "base",
label = NodeLabelTpl("new_query_embedding",
mathcode = "E{y_{i-1}}",
)
)
Edge("new_query_word", "new_query_embedding", "encoder_class",
label = "TableProjection")
Edge("raw_query_word", "raw_query_embeding", "encoder_class",
label = "TableProjection")
BlockBegin("subgraph cluster1",
label = "Encoder",
color = "black",
style = repr("filled, rounded"),
)
Node("annotation_forward", "base",
label = NodeLabelTpl("annotation_forward",
kind = "GatedRecurrentLayerGroup",
mathcode = r"\overrightarrow{h_i}")
)
Node("annotation_backward", "base",
label = NodeLabelTpl("annotation_backward",
kind = "GatedRecurrentLayerGroup",
mathcode = r"\overleftarrow{h_i}"),
)
Edge("raw_query_embeding", "annotation_forward", "encoder_class",
label = "FullMatrixProjection")
Edge("raw_query_embeding", "annotation_backward", "encoder_class",
label = "FullMatrixProjection")
Node("annotation", "base",
label = NodeLabelTpl("annotation",
kind = "concat",
size = "2*latent_dim",
mathcode = r'''
h_i = \left[
\begin{matrix}
\overrightarrow{h_i} \\
\overleftarrow{h_i}
\end{matrix}
\right]
'''))
Edge("annotation_forward", "annotation", "encoder_class", color="yellow")
Edge("annotation_backward", "annotation", "encoder_class")
Node("annotation_backward_last", "base", kind = "seqfirstins")
Edge("annotation_backward", "annotation_backward_last", "encoder_class")
Node("annotation_last_projected", "base")
Edge("annotation_backward_last", "annotation_last_projected", "encoder_class",
label = "FullMatrixProjection")
Node("annotation_projected", "base",
size = "latent_dim")
Edge("annotation", "annotation_projected", "encoder_class",
label = "FullMatrixProjection")
BlockEnd("Encoder")
BlockBegin("subgraph cluster2",
label = "decoding_layer_group",
style = repr("filled, rounded"),
color = "black",
fontcolor = "white",
fontname="Monaco",
)
Node("in_links", "base", color="beige")
Node("decoder_state_memory", "base",
label = NodeLabelTpl(
name = "decoder_state_memory",
kind = "Memory",
size = "latent_dim",
mathcode = r'''
s_{i-1}
'''
)
)
Node("encoder_out_memory", "base",
label = NodeLabelTpl(
name = "encoder_out_memory",
kind = "Memory",
size = "sequence",
mathcode = r'''
h_i
'''
))
Node("encoder_out_projected", "base",
label = NodeLabelTpl(
name = "encoder_out_projected",
kind = "Memory",
size = "latent_dim",
mathcode = r'''
U_a h_j
'''
))
Node("decoder_state_projected", "base",
label = NodeLabelTpl(
name = "decoder_state_projected",
kind = "Memory",
size = "latent_dim",
)
)
Node("expand_decoder_state_projected", "base",
label = NodeLabelTpl(
name = "expand_decoder_state_projected",
mathcode = r'''
W_a s_{i-1}
'''
)
)
Edge("decoder_state_memory", "decoder_state_projected", "encoder_class",
label = "FullMatrixProjection")
Edge("decoder_state_projected", "expand_decoder_state_projected", "encoder_class",
)
Edge("encoder_out_projected", "expand_decoder_state_projected", "encoder_class",
label = "expand",
)
Node("attention_vecs", "base",
label = NodeLabelTpl(
name = "attention_vecs",
size = "latent_dim",
mathcode = r'''
W_a s_{i-1} + U_a h_j
'''
))
Edge("expand_decoder_state_projected", "attention_vecs", "encoder_class",
label = "IdentityProjection",
)
Edge("encoder_out_projected", "attention_vecs", "encoder_class",
label = "IdentityProjection"
)
Node("attention_weight", "base",
label = NodeLabelTpl(
name = "attention_weight",
kind = "sequence_softmax",
mathcode = r'''
\alpha_{ij} = \frac{\exp (e_{ij})}
{ \sum_{k=1}^{T_x} \exp (e_{ik})}
'''
))
Edge("attention_vecs", "attention_weight", "encoder_class",
label = "FullMatrixProjection",
)
Node("context_vectors", "base",
label = NodeLabelTpl(
name = "context_vectors",
kind = "scaling",
mathcode = r'''
c_i = \sum_{j=1}^{T_x} \alpha_{ij} h_j
'''
))
Edge("attention_weight", "context_vectors", "encoder_class",
)
Edge("encoder_out_memory", "context_vectors", "encoder_class",
)
Node("context", "base",
label = NodeLabelTpl(
name = "context",
kind = "average sum",
))
Edge("context_vectors", "context", "encoder_class")
Node("decoder_state", "base",
label = NodeLabelTpl(
name = "decoder_state",
kind = "GatedRecurrentUnit",
size = "tanh",
mathcode = r'''
s_i
'''
))
Edge("context", "decoder_state", "encoder_class",
label = "FullMatrixProjection")
Edge("decoder_state", "decoder_state_memory", "encoder_class",
label="out_memory")
Node("decoder_chain", "base",
label = NodeLabelTpl(
name = "decoder_chain",
kind = "tanh",
size = "2*latent_dim",
mathcode = r'''
\tilde{t}_i = U_o s_{i-1} + V_o E_{y_{i-1}} + C_o c_i
'''
))
Edge("context", "decoder_chain", "encoder_class",
label = "FullMatrixProjection"
)
Node("output", "base",
label = NodeLabelTpl(
name = "output",
kind = "softmax",
mathcode = r'''
t_i
'''
))
Edge("decoder_chain", "output", "encoder_class",
label = "FullMatrixProjection"
)
BlockEnd("decoding")
Edge("new_query_embedding", "decoder_state", "encoder_class",
label = "FullMatrixProjection")
Edge("new_query_embedding", "decoder_chain", "encoder_class",
label = "FullMatrixProjection")
Edge("new_query_embedding", "in_links", "encoder_class",
)
Edge("annotation", "encoder_out_memory", "encoder_class",
label = "boot", style = "dashed")
Edge("annotation_projected", "encoder_out_projected", "encoder_class",
label = "boot", style = "dashed")
Edge("annotation_last_projected", "decoder_state_memory", "encoder_class",
label = "boot", style = "dashed")
Node("cost", "base",
label = NodeLabelTpl(
name = "cost",
kind = "multi-class-cross-entropy",
),
color = "red",
)
Edge("output", "cost", "encoder_class")
Edge("new_query_next_word", "cost", "encoder_class")
Node("token_error_rate", "base",
label = NodeLabelTpl(
name = "token_error_rate",
kind = "classification_error",
))
Edge("output", "token_error_rate", "encoder_class")
Edge("new_query_next_word", "token_error_rate", "encoder_class")
BlockEnd("G")
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('indicators', '0080_disaggregated_value_migration'),
]
operations = [
migrations.RemoveField(
model_name='result',
name='disaggregation_value',
),
migrations.DeleteModel(
name='DisaggregationValue',
),
]
|
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of weight variable name:target sparsity pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1)
block_width: integer
number of cols in a block (defaults to 1)
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
threshold_decay=0.9,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = sparsity if sparsity else self._setup_sparsity()
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
# Block dimensions
self._block_dim = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
# Mapping of weight names and target sparsity
self._weight_sparsity_map = self._get_weight_sparsity_map()
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, dtypes.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
if begin_step >= end_step:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d' %
(begin_step, end_step))
with ops.name_scope(self._spec.name):
p = math_ops.minimum(
1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step, dtypes.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=dtypes.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=dtypes.int32)
return last_update_step
def _get_weight_sparsity_map(self):
"""Return the map of weight_name:sparsity parsed from the hparams."""
weight_sparsity_map = {}
val_list = self._spec.weight_sparsity_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, sparsity = val.split(':')
if float(sparsity) >= 1.0:
raise ValueError('Weight sparsity can not exceed 1.0')
weight_sparsity_map[weight_name] = float(sparsity)
return weight_sparsity_map
def _get_sparsity(self, weight_name):
"""Return target sparsity for the given layer/weight name."""
target_sparsity = [
sparsity for name, sparsity in self._weight_sparsity_map.items()
if weight_name.find(name) != -1
]
if not target_sparsity:
return self._sparsity
if len(target_sparsity) > 1:
raise ValueError(
'Multiple matches in weight_sparsity_map for weight %s' % weight_name)
# TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize
# to handle other cases as well.
return math_ops.mul(
self._sparsity,
math_ops.div(target_sparsity[0], self._spec.target_sparsity))
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
max_value = math_ops.reduce_max(abs_weights)
cdf_fn = pruning_utils.compute_cdf_from_histogram
if self._spec.use_tpu:
cdf_fn = pruning_utils.compute_cdf
norm_cdf = cdf_fn(abs_weights, [0.0, max_value], nbins=self._spec.nbins)
current_threshold = math_ops.multiply(
math_ops.div(
math_ops.reduce_sum(
math_ops.cast(
math_ops.less(norm_cdf, sparsity), dtypes.float32)),
float(self._spec.nbins)), max_value)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater(abs_weights, smoothed_threshold), dtypes.float32)
return smoothed_threshold, new_mask
def _maybe_update_block_mask(self, weights, threshold):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
squeezed_weights = array_ops.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
return self._update_mask(weights, threshold)
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(squeezed_weights)
pool_window = [self._block_dim[0], self._block_dim[1]]
pool_fn = pruning_utils.factorized_pool
if not self._spec.use_tpu:
pool_fn = nn_ops.pool
abs_weights = array_ops.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if pooled_weights.get_shape().ndims != 2:
pooled_weights = array_ops.squeeze(pooled_weights)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold)
updated_mask = pruning_utils.kronecker_product(
new_mask, array_ops.ones(self._block_dim))
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, array_ops.reshape(sliced_mask,
array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
new_threshold, new_mask = self._maybe_update_block_mask(weight, threshold)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries of weight sparsities and thresholds."""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for mask, threshold in zip(masks, thresholds):
summary.scalar(mask.op.name + '/sparsity', nn_impl.zero_fraction(mask))
summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
|
"""
Tests For Cells Messaging module
"""
from nova.cells import messaging
from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova import test
from nova.tests.cells import fakes
CONF = cfg.CONF
CONF.import_opt('host', 'nova.config')
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('allowed_rpc_exception_modules',
'nova.openstack.common.rpc')
class CellsMessageClassesTestCase(test.TestCase):
"""Test case for the main Cells Message classes."""
def setUp(self):
super(CellsMessageClassesTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
# Need to be able to deserialize test.TestingException.
allowed_modules = CONF.allowed_rpc_exception_modules
allowed_modules.append('nova.test')
self.flags(allowed_rpc_exception_modules=allowed_modules)
self.our_name = 'api-cell'
self.msg_runner = fakes.get_message_runner(self.our_name)
self.state_manager = self.msg_runner.state_manager
def test_reverse_path(self):
path = 'a!b!c!d'
expected = 'd!c!b!a'
rev_path = messaging._reverse_path(path)
self.assertEqual(rev_path, expected)
def test_response_cell_name_from_path(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2!cell1')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input))
def test_response_cell_name_from_path_neighbor_only(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input,
neighbor_only=True))
def test_targeted_message(self):
self.flags(max_hop_count=99, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertFalse(tgt_message.need_response)
self.assertEqual(self.our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
child_cell = self.state_manager.get_child_cell('child-cell2')
self.assertEqual(child_cell, next_hop)
def test_create_targeted_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
target_cell = 'child-cell1!api-cell'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
tgt_message = messaging._TargetedMessage(msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertTrue(tgt_message.need_response)
self.assertEqual(our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
self.assertEqual(parent_cell, next_hop)
def test_targeted_message_when_target_is_cell_state(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_child_cell('child-cell2')
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_targeted_message_when_target_cell_state_is_me(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_my_state()
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_create_broadcast_message(self):
self.flags(max_hop_count=99, group='cells')
self.flags(name='api-cell', max_hop_count=99, group='cells')
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertFalse(bcast_message.need_response)
self.assertEqual(self.our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
child_cells = self.state_manager.get_child_cells()
self.assertEqual(child_cells, next_hops)
def test_create_broadcast_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs, direction, need_response=True)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertTrue(bcast_message.need_response)
self.assertEqual(our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
parent_cells = msg_runner.state_manager.get_parent_cells()
self.assertEqual(parent_cells, next_hops)
def test_self_targeted_message(self):
target_cell = 'api-cell'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_child_targeted_message(self):
target_cell = 'api-cell!child-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_grandchild_targeted_message(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_grandchild_targeted_message_with_response(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
return 'our_fake_response'
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
self.assertFalse(response.failure)
self.assertTrue(response.value_or_raise(), 'our_fake_response')
def test_grandchild_targeted_message_with_error(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('this should be returned')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_grandchild_targeted_message_max_hops(self):
self.flags(max_hop_count=2, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('should not be reached')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellMaxHopCountReached,
response.value_or_raise)
def test_targeted_message_invalid_cell(self):
target_cell = 'api-cell!child-cell2!grandchild-cell4'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_targeted_message_invalid_cell2(self):
target_cell = 'unknown-cell!child-cell2'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_broadcast_routing(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# fakes creates 8 cells (including ourself).
self.assertEqual(len(cells), 8)
def test_broadcast_routing_up(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
msg_runner = fakes.get_message_runner('grandchild-cell3')
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# Paths are reversed, since going 'up'
expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
'grandchild-cell3!child-cell3!api-cell'])
self.assertEqual(expected, cells)
def test_broadcast_routing_without_ourselves(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=False)
bcast_message.process()
# fakes creates 8 cells (including ourself). So we should see
# only 7 here.
self.assertEqual(len(cells), 7)
def test_broadcast_routing_with_response(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_response_max_hops(self):
self.flags(max_hop_count=2, group='cells')
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
# Should only get responses from our immediate children (and
# ourselves)
self.assertEqual(len(responses), 5)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_all_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('fake failure')
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_broadcast_routing_with_two_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method_failing(message, **kwargs):
raise test.TestingException('fake failure')
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
our_fake_method_failing)
fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
our_fake_method_failing)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
failure_responses = [resp for resp in responses if resp.failure]
success_responses = [resp for resp in responses if not resp.failure]
self.assertEqual(len(failure_responses), 2)
self.assertEqual(len(success_responses), 6)
for response in success_responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
for response in failure_responses:
self.assertIn(response.cell_name, ['api-cell!child-cell2',
'api-cell!child-cell3!grandchild-cell3'])
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
class CellsTargetedMethodsTestCase(test.TestCase):
"""Test case for _TargetedMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsTargetedMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs('api-cell', 'api-cell!child-cell2')
def _setup_attrs(self, source_cell, target_cell):
self.tgt_cell_name = target_cell
self.src_msg_runner = fakes.get_message_runner(source_cell)
self.src_state_manager = self.src_msg_runner.state_manager
tgt_shortname = target_cell.split('!')[-1]
self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
self.tgt_scheduler = self.tgt_msg_runner.scheduler
self.tgt_state_manager = self.tgt_msg_runner.state_manager
methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
self.tgt_methods_cls = methods_cls
self.tgt_compute_api = methods_cls.compute_api
self.tgt_db_inst = methods_cls.db
def test_schedule_run_instance(self):
host_sched_kwargs = {'filter_properties': {},
'key1': 'value1',
'key2': 'value2'}
self.mox.StubOutWithMock(self.tgt_scheduler, 'run_instance')
self.tgt_scheduler.run_instance(self.ctxt, host_sched_kwargs)
self.mox.ReplayAll()
self.src_msg_runner.schedule_run_instance(self.ctxt,
self.tgt_cell_name,
host_sched_kwargs)
def test_call_compute_api_method(self):
instance_uuid = 'fake_instance_uuid'
method_info = {'method': 'reboot',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_compute_api, 'reboot')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(
'fake_instance')
self.tgt_compute_api.reboot(self.ctxt, 'fake_instance', 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_call_compute_api_method_unknown_instance(self):
# Unknown instance should send a broadcast up that instance
# is gone.
instance_uuid = 'fake_instance_uuid'
instance = {'uuid': instance_uuid}
method_info = {'method': 'reboot',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
'fake_instance_uuid').AndRaise(
exception.InstanceNotFound(instance_id=instance_uuid))
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
self.assertRaises(exception.InstanceNotFound,
response.value_or_raise)
def test_update_capabilities(self):
# Route up to API
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capabs = {'cap1': set(['val1', 'val2']),
'cap2': set(['val3'])}
# The list(set([])) seems silly, but we can't assume the order
# of the list... This behavior should match the code we're
# testing... which is check that a set was converted to a list.
expected_capabs = {'cap1': list(set(['val1', 'val2'])),
'cap2': ['val3']}
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capabilities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capabilities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.src_state_manager.get_our_capabilities().AndReturn(capabs)
self.tgt_state_manager.update_cell_capabilities('child-cell2',
expected_capabs)
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
def test_update_capacities(self):
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capacs = 'fake_capacs'
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capacities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capacities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.src_state_manager.get_our_capacities().AndReturn(capacs)
self.tgt_state_manager.update_cell_capacities('child-cell2',
capacs)
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
def test_announce_capabilities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
def test_announce_capacities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capacities(self.ctxt)
class CellsBroadcastMethodsTestCase(test.TestCase):
"""Test case for _BroadcastMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsBroadcastMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs()
def _setup_attrs(self, up=True):
mid_cell = 'child-cell2'
if up:
src_cell = 'grandchild-cell1'
tgt_cell = 'api-cell'
else:
src_cell = 'api-cell'
tgt_cell = 'grandchild-cell1'
self.src_msg_runner = fakes.get_message_runner(src_cell)
methods_cls = self.src_msg_runner.methods_by_type['broadcast']
self.src_methods_cls = methods_cls
self.src_db_inst = methods_cls.db
self.src_compute_api = methods_cls.compute_api
self.mid_msg_runner = fakes.get_message_runner(mid_cell)
methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
self.mid_methods_cls = methods_cls
self.mid_db_inst = methods_cls.db
self.mid_compute_api = methods_cls.compute_api
self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
self.tgt_methods_cls = methods_cls
self.tgt_db_inst = methods_cls.db
self.tgt_compute_api = methods_cls.compute_api
def test_at_the_top(self):
self.assertTrue(self.tgt_methods_cls._at_the_top())
self.assertFalse(self.mid_methods_cls._at_the_top())
self.assertFalse(self.src_methods_cls._at_the_top())
def test_instance_update_at_top(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'other': 'moo'}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
'instance_type': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'other': 'meow'}
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'other': 'moo'}
expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
'cell_name': expected_cell_name,
'other': 'meow',
'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.src_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'instance_info_cache_update')
self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
expected_instance,
update_cells=False)
self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
expected_info_cache,
update_cells=False)
self.mox.ReplayAll()
self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
def test_instance_destroy_at_top(self):
fake_instance = {'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
update_cells=False)
self.mox.ReplayAll()
self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
def test_instance_hard_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'delete')
self.mid_compute_api.delete(self.ctxt, instance)
self.tgt_compute_api.delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'hard')
def test_instance_soft_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete')
self.mid_compute_api.soft_delete(self.ctxt, instance)
self.tgt_compute_api.soft_delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'soft')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 1,
'other stuff': 2,
'more stuff': 3}
expected_instance_fault = {'other stuff': 2,
'more stuff': 3}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_fault_create')
self.tgt_db_inst.instance_fault_create(self.ctxt,
expected_instance_fault)
self.mox.ReplayAll()
self.src_msg_runner.instance_fault_create_at_top(self.ctxt,
fake_instance_fault)
def test_bw_usage_update_at_top(self):
fake_bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_last_ctr_in',
'last_ctr_out': 'fake_last_ctr_out',
'last_refreshed': 'fake_last_refreshed'}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
self.mox.ReplayAll()
self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
fake_bw_update_info)
|
"""Init module for experimental."""
from tensorflow_model_analysis.experimental import preprocessing_functions
|
import unittest
from approvaltests.core.reporter import Reporter
from approvaltests.reporters.first_working_reporter import FirstWorkingReporter
class ReporterForTesting(Reporter):
def __init__(self, success, additional=None):
if additional is None:
additional = lambda : None
self.additional = additional
self.called = False
self.success = success
def report(self, received_path, approved_path):
self.called = True
self.additional()
return self.success
class TestFirstWorkingReporter(unittest.TestCase):
def test_first_one(self):
r1 = ReporterForTesting(True)
r2 = ReporterForTesting(False)
first = FirstWorkingReporter(r1, r2)
success = first.report('a.txt', 'b.txt')
self.assertTrue(r1.called)
self.assertTrue(success)
self.assertFalse(r2.called)
def test_second_one(self):
r1 = ReporterForTesting(False)
r2 = ReporterForTesting(False)
first = FirstWorkingReporter(r1, r2)
success = first.report('a.txt', 'b.txt')
self.assertTrue(r1.called)
self.assertTrue(r2.called)
self.assertFalse(success)
def test_exception(self):
def exception():
raise Exception()
r1 = ReporterForTesting(False, exception)
r2 = ReporterForTesting(False)
first = FirstWorkingReporter(r1, r2)
success = first.report('a.txt', 'b.txt')
self.assertTrue(r1.called)
self.assertTrue(r2.called)
self.assertFalse(success)
|
import pynodegl as ngl
from pynodegl_utils.misc import scene
from pynodegl_utils.tests.cmp_fingerprint import test_fingerprint
from pynodegl_utils.toolbox.colors import COLORS
def _render_quad(cfg, corner=(-1, -1, 0), width=(2, 0, 0), height=(0, 2, 0), color=(1, 1, 1), opacity=1):
quad = ngl.Quad(corner, width, height)
return ngl.RenderColor(color, opacity=opacity, geometry=quad, blending='src_over')
@test_fingerprint(width=16, height=16, nb_keyframes=2, tolerance=1)
@scene()
def depth_stencil_depth(cfg):
group = ngl.Group()
count = 4
for i in range(count):
depth = (i + 1) / count
corner = (-1 + (count - 1 - i) * 2 / count, -1, depth)
render = _render_quad(cfg, corner=corner, color=(depth, depth, depth))
graphicconfig = ngl.GraphicConfig(
render,
depth_test=True,
depth_func='lequal',
)
group.add_children(graphicconfig)
for i, depth in enumerate((0.4, 0.6)):
corner = (-1, -0.5 + 0.25 * i, depth)
height = (0, 1 - 0.25 * i * 2, 0)
render = _render_quad(cfg, corner=corner, height=height, color=COLORS.red, opacity=0.5)
graphicconfig = ngl.GraphicConfig(
render,
depth_test=True,
depth_func='less',
depth_write_mask=0,
)
group.add_children(graphicconfig)
return group
@test_fingerprint(width=16, height=16, nb_keyframes=2, tolerance=1)
@scene()
def depth_stencil_stencil(cfg):
group = ngl.Group()
count = 4
for i in range(count):
render = _render_quad(cfg, corner=(-1 + (i * 2) / count, -1, 0), color=COLORS.black)
graphicconfig = ngl.GraphicConfig(
render,
color_write_mask='',
stencil_test=True,
stencil_write_mask=0xff,
stencil_func='always',
stencil_ref=1,
stencil_read_mask=0xff,
stencil_fail='incr',
stencil_depth_fail='incr',
stencil_depth_pass='incr',
)
group.add_children(graphicconfig)
render = _render_quad(cfg, color=COLORS.white)
graphicconfig = ngl.GraphicConfig(
render,
stencil_test=True,
stencil_write_mask=0x0,
stencil_func='equal',
stencil_ref=1,
stencil_read_mask=0x1,
stencil_fail='keep',
stencil_depth_fail='keep',
stencil_depth_pass='keep',
)
group.add_children(graphicconfig)
return group
|
import pytest
from prometheus_client import CONTENT_TYPE_LATEST
from tests.factories import UserFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
def test_get_statistics(client):
n_dutch = 3
for _ in range(n_dutch):
u = UserFactory()
u.user_profile.country = "NL"
u.user_profile.save()
response = get_view_for_user(client=client, viewname="statistics:detail")
assert response.status_code == 200
# String country IDs are used in the topojson file we download
# 528 is the ISO ID for the Netherlands
assert '{"id": "528", "participants": 3}' in response.rendered_content
@pytest.mark.django_db
def test_prometheus_metrics(client):
user = UserFactory()
response = get_view_for_user(
client=client, viewname="api:metrics", user=user
)
assert response.status_code == 403
user.is_staff = True
user.save()
response = get_view_for_user(
client=client, viewname="api:metrics", user=user
)
assert response.status_code == 200
assert response.content_type == CONTENT_TYPE_LATEST
|
import os, sys, types, time, copy
import pygame
import random
PERMUTATION_THRESHOLD_INCREMENT = 40
WATCH_THE_LOGIC_MODE = False
puzzle1 = [\
[-1,-1,[-1,23],[-1,21],-1,-1],\
[-1,[8,15],0,0,-1,-1],\
[[8,-1],0,0,0,-1,-1],\
[[27,-1],0,0,0,0,-1],\
[[5,-1],0,0,-1,-1,-1],\
[[14,-1],0,0,-1,-1,-1],\
[-1,-1,-1,-1,-1,-1]]
puzzle2 = [\
[-1,-1,[-1,31],[-1,4],-1,[-1,23],[-1,4],-1,-1,[-1,19],[-1,5],-1,[-1,29],[-1,11]],\
[-1,[3,15],0,0,[12,3],0,0,[-1,34],[9,-1],0,0,[3,-1],0,0],\
[[31,-1],0,0,0,0,0,0,0,[13,4],0,0,[12,6],0,0],\
[[14,-1],0,0,[9,13],0,0,[9,14],0,0,0,[3,-1],0,0,-1],\
[-1,[15,-1],0,0,-1,[20,19],0,0,0,[-1,18],[11,8],0,0,[-1,13]],\
[-1,[8,8],0,0,[15,9],0,0,0,[12,12],0,0,[14,11],0,0],\
[[3,-1],0,0,[14,-1],0,0,[31,-1],0,0,0,0,0,0,0],\
[[10,-1],0,0,[11,-1],0,0,-1,[17,-1],0,0,[14,-1],0,0,-1]]
puzzle3 = [\
[-1,-1,[-1,40],[-1,16],-1,-1,[-1,44],[-1,3]],\
[-1,[13,30],0,0,[-1,4],[8,-1],0,0],\
[[19,-1],0,0,0,0,[4,3],0,0],\
[[16,-1],0,0,[12,-1],0,0,0,[-1,11]],\
[[12,-1],0,0,[-1,17],[8,-1],0,0,0],\
[[18,-1],0,0,0,[-1,16],[12,-1],0,0],\
[-1,[18,17],0,0,0,[6,16],0,0],\
[[15,-1],0,0,[23,-1],0,0,0,0],\
[[17,-1],0,0,-1,[16,-1],0,0,-1]]
randomkakuro=random.randint(1, 3)
if randomkakuro == 1:
thePuzzle=puzzle1
elif randomkakuro == 2:
thePuzzle=puzzle2
elif randomkakuro == 3:
thePuzzle=puzzle3
else:
print "4 -Une valeur incorrecte a ?t? g?n?r?"
solutionDict = {}
oldSolutionDict = {}
numberDict = {\
(2, 3) : [1, 2] ,\
(2, 4) : [1, 3] ,\
(2, 5) : [1, 2, 3, 4] ,\
(2, 6) : [1, 2, 4, 5] ,\
(2, 7) : [1, 2, 3, 4, 5, 6] ,\
(2, 8) : [1, 2, 3, 5, 6, 7] ,\
(2, 9) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(2, 10) : [1, 2, 3, 4, 6, 7, 8, 9] ,\
(2, 11) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
(2, 12) : [3, 4, 5, 7, 8, 9] ,\
(2, 13) : [4, 5, 6, 7, 8, 9] ,\
(2, 14) : [5, 6, 8, 9] ,\
(2, 15) : [6, 7, 8, 9] ,\
(2, 16) : [7, 9] ,\
(2, 17) : [8, 9] ,\
(3, 6) : [1, 2, 3] ,\
(3, 7) : [1, 2, 4] ,\
(3, 8) : [1, 2, 3, 4, 5] ,\
(3, 9) : [1, 2, 3, 4, 5, 6] ,\
(3, 10) : [1, 2, 3, 4, 5, 6, 7] ,\
(3, 11) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(3, 12) : 0,\
(3, 13) : 0,\
(3, 14) : 0,\
(3, 15) : 0,\
(3, 16) : 0,\
(3, 17) : 0,\
(3, 18) : 0,\
(3, 19) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
(3, 20) : [3, 4, 5, 6, 7, 8, 9] ,\
(3, 21) : [4, 5, 6, 7, 8, 9] ,\
(3, 22) : [5, 6, 7, 8, 9] ,\
(3, 23) : [6, 8, 9] ,\
(3, 24) : [7, 8, 9] ,\
(4, 10) : [1, 2, 3, 4] ,\
(4, 11) : [1, 2, 3, 5] ,\
(4, 12) : [1, 2, 3, 4, 5, 6] ,\
(4, 13) : [1, 2, 3, 4, 5, 6, 7] ,\
(4, 14) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(4, 15) : 0,\
(4, 16) : 0,\
(4, 17) : 0,\
(4, 18) : 0,\
(4, 19) : 0,\
(4, 20) : 0,\
(4, 21) : 0,\
(4, 22) : 0,\
(4, 23) : 0,\
(4, 24) : 0,\
(4, 25) : 0,\
(4, 26) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
(4, 27) : [3, 4, 5, 6, 7, 8, 9] ,\
(4, 28) : [4, 5, 6, 7, 8, 9] ,\
(4, 29) : [5, 7, 8, 9] ,\
(4, 30) : [6, 7, 8, 9] ,\
(5, 15) : [1, 2, 3, 4, 5] ,\
(5, 16) : [1, 2, 3, 4, 6] ,\
(5, 17) : [1, 2, 3, 4, 5, 6, 7] ,\
(5, 18) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(5, 19) : 0,\
(5, 20) : 0,\
(5, 21) : 0,\
(5, 22) : 0,\
(5, 23) : 0,\
(5, 24) : 0,\
(5, 25) : 0,\
(5, 26) : 0,\
(5, 27) : 0,\
(5, 28) : 0,\
(5, 29) : 0,\
(5, 30) : 0,\
(5, 31) : 0,\
(5, 32) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
(5, 33) : [3, 4, 5, 6, 7, 8, 9] ,\
(5, 34) : [4, 6, 7, 8, 9] ,\
(5, 35) : [5, 6, 7, 8, 9] ,\
(6, 21) : [1, 2, 3, 4, 5, 6] ,\
(6, 22) : [1, 2, 3, 4, 5, 7] ,\
(6, 23) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(6, 24) : 0,\
(6, 25) : 0,\
(6, 26) : 0,\
(6, 27) : 0,\
(6, 28) : 0,\
(6, 29) : 0,\
(6, 30) : 0,\
(6, 31) : 0,\
(6, 32) : 0,\
(6, 33) : 0,\
(6, 34) : 0,\
(6, 35) : 0,\
(6, 36) : 0,\
(6, 37) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
(6, 38) : [3, 5, 6, 7, 8, 9] ,\
(6, 39) : [4, 5, 6, 7, 8, 9] ,\
(7, 28) : [1, 2, 3, 4, 5, 6, 7] ,\
(7, 29) : [1, 2, 3, 4, 5, 6, 8] ,\
(7, 30) : 0,\
(7, 31) : 0,\
(7, 32) : 0,\
(7, 33) : 0,\
(7, 34) : 0,\
(7, 35) : 0,\
(7, 36) : 0,\
(7, 37) : 0,\
(7, 38) : 0,\
(7, 39) : 0,\
(7, 40) : 0,\
(7, 41) : [2, 4, 5, 6, 7, 8, 9] ,\
(7, 42) : [3, 4, 5, 6, 7, 8, 9] ,\
(8, 36) : [1, 2, 3, 4, 5, 6, 7, 8] ,\
(8, 37) : [1, 2, 3, 4, 5, 6, 7, 9] ,\
(8, 38) : [1, 2, 3, 4, 5, 6, 8, 9] ,\
(8, 39) : [1, 2, 3, 4, 5, 7, 8, 9] ,\
(8, 40) : [1, 2, 3, 4, 6, 7, 8, 9] ,\
(8, 41) : [1, 2, 3, 5, 6, 7, 8, 9] ,\
(8, 42) : [1, 2, 4, 5, 6, 7, 8, 9] ,\
(8, 43) : [1, 3, 4, 5, 6, 7, 8, 9] ,\
(8, 44) : [2, 3, 4, 5, 6, 7, 8, 9] ,\
}
puzzleRows = len(thePuzzle)
puzzleCols = len(thePuzzle[0])
scalefactor = 65
height = puzzleRows * scalefactor
width = puzzleCols * scalefactor
def rectForXYcell (x, y):
return pygame.Rect(width/puzzleCols*y,height/puzzleRows*x,width/puzzleCols+1,height/puzzleRows+1)
def permutate(array):
numOfPermutations = 1
for i in range(len(array)):
numOfPermutations = numOfPermutations * len(array[i])
theIndex = [0]*len(array)
theList = []
for x in range(numOfPermutations):
subList = []
for i in range(len(array)):
subList.append(array[i][theIndex[i]])
if len(set(subList)) == len(subList):
theList.append(subList)
for i in range(len(array)):
theIndex[i] = theIndex[i] + 1
if theIndex[i] != len(array[i]):
break
else:
theIndex[i] = 0
return theList
def waitForKeypress():
while 1:
event = pygame.event.wait()
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
return
def removeIllegalValues(index):
lastHint = nextHint = 0
arrayIndexes = []
# Horizontale
statusBarMessage("V?rification de permutations par ligne. Ignorer les lignes > %d." % PERMUTATION_THRESHOLD_INCREMENT*index)
for x in range(puzzleRows):
for y in range(puzzleCols):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][0]!=-1:
nextHint = thePuzzle[x][y][0]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
theBlock = []
for theIndex in arrayIndexes:
theBlock.append(list(solutionDict[theIndex]))
numOfPermutations = 1
for i in range(len(theBlock)):
numOfPermutations = numOfPermutations * len(theBlock[i])
if numOfPermutations < PERMUTATION_THRESHOLD_INCREMENT*index:
mutations = permutate(theBlock)
goodMutations = []
for theMutation in mutations:
if sum(theMutation) == lastHint:
goodMutations.append(theMutation)
if goodMutations == []:
print "Puzzle incoh?rentes."
print "block %dx%d" %(x,y)
sys.exit()
for t in range(len(arrayIndexes)):
theBlock = []
for s in range(len(goodMutations)):
theBlock.append(goodMutations[s][t])
solutionDict[arrayIndexes[t]] = set(theBlock)
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
arrayIndexes = []
lastHint = 0
if nextHint:
lastHint = nextHint
nextHint = 0
statusBarMessage("V?rification de permutations par colonne. ignorant colonnes > %d." % PERMUTATION_THRESHOLD_INCREMENT*index)
# Verticale
for y in range(puzzleCols):
for x in range(puzzleRows):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][1]!=-1:
nextHint = thePuzzle[x][y][1]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
theBlock = []
for i in range(len(arrayIndexes)):
theBlock.append(list(solutionDict[arrayIndexes[i]]))
numOfPermutations = 1
for i in range(len(theBlock)):
numOfPermutations = numOfPermutations * len(theBlock[i])
if numOfPermutations < PERMUTATION_THRESHOLD_INCREMENT*index:
mutations = permutate(theBlock)
goodMutations = []
for theMutation in mutations:
if sum(theMutation) == lastHint:
goodMutations.append(theMutation)
if goodMutations == []:
print "Puzzle incoh?rentes."
print "block %dx%d" %(x,y)
print theBlock
print lastHint
print mutations
sys.exit()
for t in range(len(arrayIndexes)):
theBlock = []
for s in range(len(goodMutations)):
theBlock.append(goodMutations[s][t])
solutionDict[arrayIndexes[t]] = set(theBlock)
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
arrayIndexes = []
lastHint = 0
if nextHint:
lastHint = nextHint
nextHint = 0
def removeDuplicateValues():
lastHint = nextHint = 0
arrayIndexes = []
# Horizontale
statusBarMessage("Retrait Valeurs dupliqu?es par les ligne.")
for x in range(puzzleRows):
for y in range(puzzleCols):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][0]!=-1:
nextHint = thePuzzle[x][y][0]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
for theIndex in arrayIndexes:
if len(solutionDict[theIndex])==1:
for theOtherIndex in arrayIndexes:
if theOtherIndex != theIndex:
solutionDict[theOtherIndex] = solutionDict[theOtherIndex] - solutionDict[theIndex]
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
lastHint = 0
arrayIndexes = []
if nextHint:
lastHint = nextHint
nextHint = 0
# Verticale
statusBarMessage("Retrait Valeurs dupliqu?es les collone par.")
for y in range(puzzleCols):
for x in range(puzzleRows):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][1]!=-1:
nextHint = thePuzzle[x][y][1]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
for theIndex in arrayIndexes:
if len(solutionDict[theIndex])==1:
for theOtherIndex in arrayIndexes:
if theOtherIndex != theIndex:
solutionDict[theOtherIndex] = solutionDict[theOtherIndex] - solutionDict[theIndex]
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
lastHint = 0
arrayIndexes = []
if nextHint:
lastHint = nextHint
nextHint = 0
def isPuzzleSolved():
# Horizontale
for x in range(puzzleRows):
for y in range(puzzleCols):
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
if len(solutionDict[thePuzzle[x][y]]) > 1:
return 0
# Verticale
for y in range(puzzleCols):
for x in range(puzzleRows):
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
if len(solutionDict[thePuzzle[x][y]]) > 1:
return 0
return 1
def initSolutionDict():
uniqueInt = 1
for x in range(puzzleRows):
for y in range(puzzleCols):
if thePuzzle[x][y] == 0:
solutionDict[uniqueInt] = set([1,2,3,4,5,6,7,8,9])
thePuzzle[x][y] = uniqueInt
uniqueInt = uniqueInt + 1
def reduceSearchSpace():
lastHint = nextHint = 0
arrayIndexes = []
statusBarMessage("R?duire ligne de recherche en utilisant l'espace de consultation de table.")
#Horizontally
for x in range(puzzleRows):
for y in range(puzzleCols):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][0]!=-1:
nextHint = thePuzzle[x][y][0]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
for z in arrayIndexes:
try:
entry = numberDict[(len(arrayIndexes),lastHint)]
if entry != 0:
solutionDict[z] = set(entry)
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
except KeyError:
print 'KeyError (casse-t?te incompatible)'
arrayIndexes = []
lastHint = 0
if nextHint:
lastHint = nextHint
nextHint = 0
# Verticale
statusBarMessage("R?duire recherche colonne espace en utilisant la table de correspondance.")
for y in range(puzzleCols):
for x in range(puzzleRows):
if isList(thePuzzle[x][y]) and thePuzzle[x][y][1]!=-1:
nextHint = thePuzzle[x][y][1]
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
arrayIndexes.append(thePuzzle[x][y])
if (thePuzzle[x][y] == -1 or isList(thePuzzle[x][y])) and lastHint != 0:
for z in arrayIndexes:
try:
entry = numberDict[(len(arrayIndexes),lastHint)]
if entry != 0:
solutionDict[z] = solutionDict[z] & set(entry)
if WATCH_THE_LOGIC_MODE:
redrawAllWhiteBoxes()
waitForKeypress()
except KeyError:
print 'KeyError (puzzle inconsistent)'
arrayIndexes = []
lastHint = 0
if nextHint:
lastHint = nextHint
nextHint = 0
def redrawWhiteBox(x,y):
# Necessary?
screen.fill([255,255,255],rectForXYcell(x,y))
pygame.draw.rect(screen,[0,0,0],rectForXYcell(x,y),1)
try:
possibleSolutions = solutionDict[thePuzzle[x][y]]
if len(possibleSolutions) == 1:
# Je ne peux pas imaginer un meilleur fa?on de faire=/
theDigit = possibleSolutions.pop()
possibleSolutions.add(theDigit)
therender = bigfont.render("%d" %theDigit,1,[0,0,255])
screen.blit(therender,[width/puzzleCols*(y+0.5)-therender.get_width()/2,height/puzzleRows*(x+0.5)-therender.get_height()/2])
return
else:
if(1 in possibleSolutions):
string1 = "1 "
else:
string1 = " "
if(2 in possibleSolutions):
string1 = string1 + "2 "
else:
string1 = string1 + " "
if(3 in possibleSolutions):
string1 = string1 + "3"
else:
string1 = string1 + " "
if(4 in possibleSolutions):
string2 = "4 "
else:
string2 = " "
if(5 in possibleSolutions):
string2 = string2 + "5 "
else:
string2 = string2 + " "
if(6 in possibleSolutions):
string2 = string2 + "6"
else:
string2 = string2 + " "
if(7 in possibleSolutions):
string3 = "7 "
else:
string3 = " "
if(8 in possibleSolutions):
string3 = string3 + "8 "
else:
string3 = string3 + " "
if(9 in possibleSolutions):
string3 = string3 + "9"
else:
string3 = string3 + " "
except KeyError:
string1 = "1 2 3"
string2 = "4 5 6"
string3 = "7 8 9"
therender = monofont.render(string1,1,[0,128,0])
screen.blit(therender,[width/puzzleCols*(y+0.5)-therender.get_width()/2,height/puzzleRows*(x+0.2)-therender.get_height()/2])
therender = monofont.render(string2,1,[0,128,0])
screen.blit(therender,[width/puzzleCols*(y+0.5)-therender.get_width()/2,height/puzzleRows*(x+0.5)-therender.get_height()/2])
therender = monofont.render(string3,1,[0,128,0])
screen.blit(therender,[width/puzzleCols*(y+0.5)-therender.get_width()/2,height/puzzleRows*(x+0.8)-therender.get_height()/2])
def redrawAllWhiteBoxes():
for x in range(puzzleRows):
for y in range(puzzleCols):
if thePuzzle[x][y] > 0 and not isList(thePuzzle[x][y]):
redrawWhiteBox(x,y)
try:
if oldSolutionDict[thePuzzle[x][y]] != solutionDict[thePuzzle[x][y]]:
pygame.draw.rect(screen,[255,0,0],rectForXYcell(x,y).inflate(-4,-4),3)
except KeyError:
None
oldSolutionDict.update(solutionDict)
pygame.display.flip()
def isList(l):
"""M?thode pratique qui fonctionne avec toutes les versions 2.x de Python
pour d?terminer si oui ou non quelque chose est listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
if not pygame.font: print 'Attention, les polices sont d?sactiv?es'
if not pygame.mixer: print 'Attention, les polices sont d?sactiv?es'
def statusBarMessage(theMessage):
screen.fill([0,0,0],pygame.Rect(0,height,width,30))
therender = statusfont.render(theMessage,1,[255,255,255],[0,0,0])
screen.blit(therender,[width/2-therender.get_width()/2,height+15-therender.get_height()/2])
pygame.init()
screen = pygame.display.set_mode([width+1,height+30])
thefont = pygame.font.Font(pygame.font.get_default_font(),scalefactor/3)
monofont = pygame.font.Font(pygame.font.match_font('Courier','Courier New','Monospaced'),scalefactor/3)
bigfont = pygame.font.Font(pygame.font.get_default_font(),scalefactor/1)
statusfont = pygame.font.Font(pygame.font.get_default_font(),16)
if monofont == None:
print "le fonte n'existe pas"
for x in range(puzzleRows):
for y in range(puzzleCols):
# bo?te noire
if thePuzzle[x][y] == -1:
screen.fill([131,139,131],rectForXYcell(x,y))
pygame.draw.rect(screen,[255,255,255],rectForXYcell(x,y),1)
# Bo?te noire avec la piste et le nombre
elif isList(thePuzzle[x][y]):
screen.fill([131,139,131],rectForXYcell(x,y))
pygame.draw.rect(screen,[255,255,255],rectForXYcell(x,y),1)
pygame.draw.line(screen, [255,255,255], [width/puzzleCols*y,height/puzzleRows*x], [width/puzzleCols*(y+1),height/puzzleRows*(x+1)], 1)
if thePuzzle[x][y][0] != -1:
therender = thefont.render("%d" %thePuzzle[x][y][0],1,[255,255,255],[131,139,131])
screen.blit(therender,[width/puzzleCols*(y+0.75)-therender.get_width()/2,height/puzzleRows*(x+0.30)-therender.get_height()/2])
if thePuzzle[x][y][1] != -1:
therender = thefont.render("%d" %thePuzzle[x][y][1],1,[255,255,255],[131,139,131])
screen.blit(therender,[width/puzzleCols*(y+0.25)-therender.get_width()/2,height/puzzleRows*(x+0.75)-therender.get_height()/2])
# bo?te blanc
else:
redrawWhiteBox(x,y)
pygame.display.flip()
waitForKeypress()
startTime = time.clock()
initSolutionDict()
reduceSearchSpace()
duration = time.clock() - startTime
grandStartTime = startTime
print "Recherche r?duction de l'espace achev?e en %.2f secondes." %duration
puzzleSolved = 0
for count in range(100):
for i in range(2):
startTime = time.clock()
removeIllegalValues(count*2-1+i)
duration = time.clock() - startTime
print "It?ration ill?gal de valeur achev?e en %.2f secondes." %duration
for i in range(2):
startTime = time.clock()
removeDuplicateValues()
duration = time.clock() - startTime
print "It?ration double de valeur achev?e en %.2f secondes." %duration
if isPuzzleSolved():
puzzleSolved = 1
break
duration = time.clock() - grandStartTime
if(puzzleSolved):
print "Puzzle r?solu en %.3f secondes." %duration
else:
print "Je ne pouvais pas r?soudre cette ?nigme, mais j'ai essay? de %.3f secondes." %duration
redrawAllWhiteBoxes()
while 1:
event = pygame.event.wait()
if event.type == pygame.QUIT:
sys.exit()
if event.type == 1:
None
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Famille.description'
db.add_column(u'famille_famille', 'description',
self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.type_garde'
db.add_column(u'famille_famille', 'type_garde',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.type_presta'
db.add_column(u'famille_famille', 'type_presta',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.tarif'
db.add_column(u'famille_famille', 'tarif',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Famille.diploma'
db.add_column(u'famille_famille', 'diploma',
self.gf('django.db.models.fields.CharField')(max_length=30, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.menage'
db.add_column(u'famille_famille', 'menage',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.repassage'
db.add_column(u'famille_famille', 'repassage',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.cdt_periscolaire'
db.add_column(u'famille_famille', 'cdt_periscolaire',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.sortie_ecole'
db.add_column(u'famille_famille', 'sortie_ecole',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.nuit'
db.add_column(u'famille_famille', 'nuit',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.non_fumeur'
db.add_column(u'famille_famille', 'non_fumeur',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.devoirs'
db.add_column(u'famille_famille', 'devoirs',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.urgence'
db.add_column(u'famille_famille', 'urgence',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.psc1'
db.add_column(u'famille_famille', 'psc1',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.permis'
db.add_column(u'famille_famille', 'permis',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.langue'
db.add_column(u'famille_famille', 'langue',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'Famille.baby'
db.add_column(u'famille_famille', 'baby',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'Famille.city'
db.alter_column(u'famille_famille', 'city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True))
# Changing field 'Famille.street'
db.alter_column(u'famille_famille', 'street', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Changing field 'Famille.postal_code'
db.alter_column(u'famille_famille', 'postal_code', self.gf('django.db.models.fields.CharField')(max_length=8, null=True))
def backwards(self, orm):
# Deleting field 'Famille.description'
db.delete_column(u'famille_famille', 'description')
# Deleting field 'Famille.type_garde'
db.delete_column(u'famille_famille', 'type_garde')
# Deleting field 'Famille.type_presta'
db.delete_column(u'famille_famille', 'type_presta')
# Deleting field 'Famille.tarif'
db.delete_column(u'famille_famille', 'tarif')
# Deleting field 'Famille.diploma'
db.delete_column(u'famille_famille', 'diploma')
# Deleting field 'Famille.menage'
db.delete_column(u'famille_famille', 'menage')
# Deleting field 'Famille.repassage'
db.delete_column(u'famille_famille', 'repassage')
# Deleting field 'Famille.cdt_periscolaire'
db.delete_column(u'famille_famille', 'cdt_periscolaire')
# Deleting field 'Famille.sortie_ecole'
db.delete_column(u'famille_famille', 'sortie_ecole')
# Deleting field 'Famille.nuit'
db.delete_column(u'famille_famille', 'nuit')
# Deleting field 'Famille.non_fumeur'
db.delete_column(u'famille_famille', 'non_fumeur')
# Deleting field 'Famille.devoirs'
db.delete_column(u'famille_famille', 'devoirs')
# Deleting field 'Famille.urgence'
db.delete_column(u'famille_famille', 'urgence')
# Deleting field 'Famille.psc1'
db.delete_column(u'famille_famille', 'psc1')
# Deleting field 'Famille.permis'
db.delete_column(u'famille_famille', 'permis')
# Deleting field 'Famille.langue'
db.delete_column(u'famille_famille', 'langue')
# Deleting field 'Famille.baby'
db.delete_column(u'famille_famille', 'baby')
# Changing field 'Famille.city'
db.alter_column(u'famille_famille', 'city', self.gf('django.db.models.fields.CharField')(default='', max_length=40))
# Changing field 'Famille.street'
db.alter_column(u'famille_famille', 'street', self.gf('django.db.models.fields.CharField')(default='', max_length=100))
# Changing field 'Famille.postal_code'
db.alter_column(u'famille_famille', 'postal_code', self.gf('django.db.models.fields.CharField')(default='', max_length=8))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'famille.enfant': {
'Meta': {'object_name': 'Enfant'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'e_birthday': ('django.db.models.fields.DateField', [], {'db_column': "'birthday'", 'blank': 'True'}),
'e_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_column': "'name'"}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enfants'", 'to': u"orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'famille.famille': {
'Meta': {'object_name': 'Famille'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'langue': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_presta': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'famille.prestataire': {
'Meta': {'object_name': 'Prestataire'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sub_types': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['famille']
|
import sys
import array as pyarray
from math import exp, log
from collections import namedtuple
from numpy import array, random, tile
from pyspark import SparkContext, since
from pyspark.rdd import RDD
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector # noqa: F401
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
from pyspark.streaming import DStream
__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans',
'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel',
'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel',
'LDA', 'LDAModel']
@inherit_doc
class BisectingKMeansModel(JavaModelWrapper):
"""
A clustering model derived from the bisecting k-means method.
.. versionadded:: 2.0.0
Examples
--------
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> bskm = BisectingKMeans()
>>> model = bskm.train(sc.parallelize(data, 2), k=4)
>>> p = array([0.0, 0.0])
>>> model.predict(p)
0
>>> model.k
4
>>> model.computeCost(p)
0.0
"""
def __init__(self, java_model):
super(BisectingKMeansModel, self).__init__(java_model)
self.centers = [c.toArray() for c in self.call("clusterCenters")]
@property
@since('2.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy
arrays."""
return self.centers
@property
@since('2.0.0')
def k(self):
"""Get the number of clusters"""
return self.call("k")
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
.. versionadded:: 2.0.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
A data point (or RDD of points) to determine cluster index.
:py:class:`pyspark.mllib.linalg.Vector` can be replaced with equivalent
objects (list, tuple, numpy.ndarray).
Returns
-------
int or :py:class:`pyspark.RDD` of int
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("predict", vecs)
x = _convert_to_vector(x)
return self.call("predict", x)
def computeCost(self, x):
"""
Return the Bisecting K-means cost (sum of squared distances of
points to their nearest center) for this model on the given
data. If provided with an RDD of points returns the sum.
.. versionadded:: 2.0.0
Parameters
----------
point : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
A data point (or RDD of points) to compute the cost(s).
:py:class:`pyspark.mllib.linalg.Vector` can be replaced with equivalent
objects (list, tuple, numpy.ndarray).
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("computeCost", vecs)
return self.call("computeCost", _convert_to_vector(x))
class BisectingKMeans(object):
"""
A bisecting k-means algorithm based on the paper "A comparison of
document clustering techniques" by Steinbach, Karypis, and Kumar,
with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and
bisects each of them using k-means, until there are `k` leaf
clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped
together to increase parallelism. If bisecting all divisible
clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Notes
-----
See the original paper [1]_
.. [1] Steinbach, M. et al. "A Comparison of Document Clustering Techniques." (2000).
KDD Workshop on Text Mining, 2000
http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf
"""
@classmethod
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604):
"""
Runs the bisecting k-means algorithm return the model.
.. versionadded:: 2.0.0
Parameters
----------
rdd : :py:class:`pyspark.RDD`
Training points as an `RDD` of `Vector` or convertible
sequence types.
k : int, optional
The desired number of leaf clusters. The actual number could
be smaller if there are no divisible leaf clusters.
(default: 4)
maxIterations : int, optional
Maximum number of iterations allowed to split clusters.
(default: 20)
minDivisibleClusterSize : float, optional
Minimum number of points (if >= 1.0) or the minimum proportion
of points (if < 1.0) of a divisible cluster.
(default: 1)
seed : int, optional
Random seed value for cluster initialization.
(default: -1888008604 from classOf[BisectingKMeans].getName.##)
"""
java_model = callMLlibFunc(
"trainBisectingKMeans", rdd.map(_convert_to_vector),
k, maxIterations, minDivisibleClusterSize, seed)
return BisectingKMeansModel(java_model)
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
.. versionadded:: 0.9.0
Examples
--------
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-383.1,-382.9, 28.7,31.2, 366.2,367.3]).reshape(3, 2)
>>> model = KMeans.train(sc.parallelize(data), 3, maxIterations=0,
... initialModel = KMeansModel([(-1000.0,-1000.0),(5.0,5.0),(1000.0,1000.0)]))
>>> model.clusterCenters
[array([-1000., -1000.]), array([ 5., 5.]), array([ 1000., 1000.])]
"""
def __init__(self, centers):
self.centers = centers
@property
@since('1.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
@since('1.4.0')
def k(self):
"""Total number of clusters."""
return len(self.centers)
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
.. versionadded:: 0.9.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
A data point (or RDD of points) to determine cluster index.
:py:class:`pyspark.mllib.linalg.Vector` can be replaced with equivalent
objects (list, tuple, numpy.ndarray).
Returns
-------
int or :py:class:`pyspark.RDD` of int
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
best = 0
best_distance = float("inf")
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
for i in range(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given
data.
.. versionadded:: 1.4.0
Parameters
----------
rdd : ::py:class:`pyspark.RDD`
The RDD of points to compute the cost on.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
"""
K-means clustering.
.. versionadded:: 0.9.0
"""
@classmethod
def train(cls, rdd, k, maxIterations=100, initializationMode="k-means||",
seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None):
"""
Train a k-means clustering model.
.. versionadded:: 0.9.0
Parameters
----------
rdd : ::py:class:`pyspark.RDD`
Training points as an `RDD` of :py:class:`pyspark.mllib.linalg.Vector`
or convertible sequence types.
k : int
Number of clusters to create.
maxIterations : int, optional
Maximum number of iterations allowed.
(default: 100)
initializationMode : str, optional
The initialization algorithm. This can be either "random" or
"k-means||".
(default: "k-means||")
seed : int, optional
Random seed value for cluster initialization. Set as None to
generate seed based on system time.
(default: None)
initializationSteps :
Number of steps for the k-means|| initialization mode.
This is an advanced setting -- the default of 2 is almost
always enough.
(default: 2)
epsilon : float, optional
Distance threshold within which a center will be considered to
have converged. If all centers move less than this Euclidean
distance, iterations are stopped.
(default: 1e-4)
initialModel : :py:class:`KMeansModel`, optional
Initial cluster centers can be provided as a KMeansModel object
rather than using the random or k-means|| initializationModel.
(default: None)
"""
clusterInitialModel = []
if initialModel is not None:
if not isinstance(initialModel, KMeansModel):
raise Exception("initialModel is of "+str(type(initialModel))+". It needs "
"to be of <type 'KMeansModel'>")
clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters]
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
initializationMode, seed, initializationSteps, epsilon,
clusterInitialModel)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
@inherit_doc
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A clustering model derived from the Gaussian Mixture Model method.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
>>> from numpy.testing import assert_equal
>>> from shutil import rmtree
>>> import os, tempfile
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2), 2)
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
False
>>> labels[4]==labels[5]
True
>>> model.predict([-0.1,-0.05])
0
>>> softPredicted = model.predictSoft([-0.1,-0.05])
>>> abs(softPredicted[0] - 1.0) < 0.03
True
>>> abs(softPredicted[1] - 0.0) < 0.03
True
>>> abs(softPredicted[2] - 0.0) < 0.03
True
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = GaussianMixtureModel.load(sc, path)
>>> assert_equal(model.weights, sameModel.weights)
>>> mus, sigmas = list(
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
>>> sameMus, sameSigmas = list(
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
>>> mus == sameMus
True
>>> sigmas == sameSigmas
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734])
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=4)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]
True
>>> labels[2]==labels[3]==labels[4]
True
"""
@property
@since('1.4.0')
def weights(self):
"""
Weights for each Gaussian distribution in the mixture, where weights[i] is
the weight for Gaussian i, and weights.sum == 1.
"""
return array(self.call("weights"))
@property
@since('1.4.0')
def gaussians(self):
"""
Array of MultivariateGaussian where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
"""
return [
MultivariateGaussian(gaussian[0], gaussian[1])
for gaussian in self.call("gaussians")]
@property
@since('1.4.0')
def k(self):
"""Number of gaussians in mixture."""
return len(self.weights)
def predict(self, x):
"""
Find the cluster to which the point 'x' or each point in RDD 'x'
has maximum membership in this model.
.. versionadded:: 1.3.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
A feature vector or an RDD of vectors representing data points.
Returns
-------
numpy.float64 or :py:class:`pyspark.RDD` of int
Predicted cluster label or an RDD of predicted cluster labels
if the input is an RDD.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
else:
z = self.predictSoft(x)
return z.argmax()
def predictSoft(self, x):
"""
Find the membership of point 'x' or each point in RDD 'x' to all mixture components.
.. versionadded:: 1.3.0
Parameters
----------
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
A feature vector or an RDD of vectors representing data points.
Returns
-------
numpy.ndarray or :py:class:`pyspark.RDD`
The membership value to all mixture components for vector 'x'
or each vector in RDD 'x'.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
else:
return self.call("predictSoft", _convert_to_vector(x)).toArray()
@classmethod
def load(cls, sc, path):
"""Load the GaussianMixtureModel from disk.
.. versionadded:: 1.5.0
Parameters
----------
sc : :py:class:`SparkContext`
path : str
Path to where the model is stored.
"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model)
return cls(wrapper)
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
.. versionadded:: 1.3.0
"""
@classmethod
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
"""
Train a Gaussian Mixture clustering model.
.. versionadded:: 1.3.0
Parameters
----------
rdd : ::py:class:`pyspark.RDD`
Training points as an `RDD` of :py:class:`pyspark.mllib.linalg.Vector`
or convertible sequence types.
k : int
Number of independent Gaussians in the mixture model.
convergenceTol : float, optional
Maximum change in log-likelihood at which convergence is
considered to have occurred.
(default: 1e-3)
maxIterations : int, optional
Maximum number of iterations allowed.
(default: 100)
seed : int, optional
Random seed for initial Gaussian distribution. Set as None to
generate seed based on system time.
(default: None)
initialModel : GaussianMixtureModel, optional
Initial GMM starting point, bypassing the random
initialization.
(default: None)
"""
initialModelWeights = None
initialModelMu = None
initialModelSigma = None
if initialModel is not None:
if initialModel.k != k:
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
% (initialModel.k, k))
initialModelWeights = list(initialModel.weights)
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
k, convergenceTol, maxIterations, seed,
initialModelWeights, initialModelMu, initialModelSigma)
return GaussianMixtureModel(java_model)
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
Model produced by :py:class:`PowerIterationClustering`.
.. versionadded:: 1.5.0
Examples
--------
>>> import math
>>> def genCircle(r, n):
... points = []
... for i in range(0, n):
... theta = 2.0 * math.pi * i / n
... points.append((r * math.cos(theta), r * math.sin(theta)))
... return points
>>> def sim(x, y):
... dist2 = (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1])
... return math.exp(-dist2 / 2.0)
>>> r1 = 1.0
>>> n1 = 10
>>> r2 = 4.0
>>> n2 = 40
>>> n = n1 + n2
>>> points = genCircle(r1, n1) + genCircle(r2, n2)
>>> similarities = [(i, j, sim(points[i], points[j])) for i in range(1, n) for j in range(0, i)]
>>> rdd = sc.parallelize(similarities, 2)
>>> model = PowerIterationClustering.train(rdd, 2, 40)
>>> model.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
>>> sameModel.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
@property
@since('1.5.0')
def k(self):
"""
Returns the number of clusters.
"""
return self.call("k")
@since('1.5.0')
def assignments(self):
"""
Returns the cluster assignments of this model.
"""
return self.call("getAssignments").map(
lambda x: (PowerIterationClustering.Assignment(*x)))
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
class PowerIterationClustering(object):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm.
Developed by Lin and Cohen [1]_. From the abstract:
"PIC finds a very low-dimensional embedding of a
dataset using truncated power iteration on a normalized pair-wise
similarity matrix of the data."
.. versionadded:: 1.5.0
.. [1] Lin, Frank & Cohen, William. (2010). Power Iteration Clustering.
http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf
"""
@classmethod
def train(cls, rdd, k, maxIterations=100, initMode="random"):
r"""
Train PowerIterationClusteringModel
.. versionadded:: 1.5.0
Parameters
----------
rdd : :py:class:`pyspark.RDD`
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
k : int
Number of clusters.
maxIterations : int, optional
Maximum number of iterations of the PIC algorithm.
(default: 100)
initMode : str, optional
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random")
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model)
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
"""
Represents an (id, cluster) tuple.
.. versionadded:: 1.5.0
"""
class StreamingKMeansModel(KMeansModel):
"""
Clustering model which can perform an online update of the centroids.
The update formula for each centroid is given by
- c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
- n_t+1 = n_t * a + m_t
where
- c_t: Centroid at the n_th iteration.
- n_t: Number of samples (or) weights associated with the centroid
at the n_th iteration.
- x_t: Centroid of the new data closest to c_t.
- m_t: Number of samples (or) weights of the new data closest to c_t
- c_t+1: New centroid.
- n_t+1: New number of weights.
- a: Decay Factor, which gives the forgetfulness.
.. versionadded:: 1.5.0
Parameters
----------
clusterCenters : list of :py:class:`pyspark.mllib.linalg.Vector` or covertible
Initial cluster centers.
clusterWeights : :py:class:`pyspark.mllib.linalg.Vector` or covertible
List of weights assigned to each cluster.
Notes
-----
If a is set to 1, it is the weighted mean of the previous
and new data. If it set to zero, the old centroids are completely
forgotten.
Examples
--------
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
>>> initWeights = [1.0, 1.0]
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
... [0.9, 0.9], [1.1, 1.1]])
>>> stkm = stkm.update(data, 1.0, "batches")
>>> stkm.centers
array([[ 0., 0.],
[ 1., 1.]])
>>> stkm.predict([-0.1, -0.1])
0
>>> stkm.predict([0.9, 0.9])
1
>>> stkm.clusterWeights
[3.0, 3.0]
>>> decayFactor = 0.0
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
>>> stkm = stkm.update(data, 0.0, "batches")
>>> stkm.centers
array([[ 0.2, 0.2],
[ 1.5, 1.5]])
>>> stkm.clusterWeights
[1.0, 1.0]
>>> stkm.predict([0.2, 0.2])
0
>>> stkm.predict([1.5, 1.5])
1
"""
def __init__(self, clusterCenters, clusterWeights):
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
self._clusterWeights = list(clusterWeights)
@property
@since('1.5.0')
def clusterWeights(self):
"""Return the cluster weights."""
return self._clusterWeights
@since('1.5.0')
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
.. versionadded:: 1.5.0
Parameters
----------
data : :py:class:`pyspark.RDD`
RDD with new data for the model update.
decayFactor : float
Forgetfulness of the previous centroids.
timeUnit : str
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self
class StreamingKMeans(object):
"""
Provides methods to set k, decayFactor, timeUnit to configure the
KMeans algorithm for fitting and predicting on incoming dstreams.
More details on how the centroids are updated are provided under the
docs of StreamingKMeansModel.
.. versionadded:: 1.5.0
Parameters
----------
k : int, optional
Number of clusters.
(default: 2)
decayFactor : float, optional
Forgetfulness of the previous centroids.
(default: 1.0)
timeUnit : str, optional
Can be "batches" or "points". If points, then the decay factor is
raised to the power of number of new points and if batches, then
decay factor will be used as is.
(default: "batches")
"""
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
self._k = k
self._decayFactor = decayFactor
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
self._timeUnit = timeUnit
self._model = None
@since('1.5.0')
def latestModel(self):
"""Return the latest model"""
return self._model
def _validate(self, dstream):
if self._model is None:
raise ValueError(
"Initial centers should be set either by setInitialCenters "
"or setRandomCenters.")
if not isinstance(dstream, DStream):
raise TypeError(
"Expected dstream to be of type DStream, "
"got type %s" % type(dstream))
@since('1.5.0')
def setK(self, k):
"""Set number of clusters."""
self._k = k
return self
@since('1.5.0')
def setDecayFactor(self, decayFactor):
"""Set decay factor."""
self._decayFactor = decayFactor
return self
@since('1.5.0')
def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self
@since('1.5.0')
def setInitialCenters(self, centers, weights):
"""
Set initial centers. Should be set before calling trainOn.
"""
self._model = StreamingKMeansModel(centers, weights)
return self
@since('1.5.0')
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centers to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update)
@since('1.5.0')
def predictOn(self, dstream):
"""
Make predictions on a dstream.
Returns a transformed dstream object
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since('1.5.0')
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
""" A clustering model derived from the LDA method.
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology
- "word" = "term": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over words representing some concept
.. versionadded:: 1.5.0
Notes
-----
See the original LDA paper (journal version) [1]_
.. [1] Blei, D. et al. "Latent Dirichlet Allocation."
J. Mach. Learn. Res. 3 (2003): 993-1022.
https://www.jmlr.org/papers/v3/blei03a
Examples
--------
>>> from pyspark.mllib.linalg import Vectors
>>> from numpy.testing import assert_almost_equal, assert_equal
>>> data = [
... [1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
>>> model = LDA.train(rdd, k=2, seed=1)
>>> model.vocabSize()
2
>>> model.describeTopics()
[([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
>>> model.describeTopics(1)
[([1], [0.5...]), ([0], [0.5...])]
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
>>> import os, tempfile
>>> from shutil import rmtree
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = LDAModel.load(sc, path)
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
>>> sameModel.vocabSize() == model.vocabSize()
True
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
@since('1.5.0')
def topicsMatrix(self):
"""Inferred topics, where each topic is represented by a distribution over terms."""
return self.call("topicsMatrix").toArray()
@since('1.5.0')
def vocabSize(self):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
def describeTopics(self, maxTermsPerTopic=None):
"""Return the topics described by weighted terms.
.. versionadded:: 1.6.0
.. warning:: If vocabSize and k are large, this can return a large object!
Parameters
----------
maxTermsPerTopic : int, optional
Maximum number of terms to collect for each topic.
(default: vocabulary size)
Returns
-------
list
Array over topics. Each topic is represented as a pair of
matching arrays: (term indices, term weights in topic).
Each topic's terms are sorted in order of decreasing weight.
"""
if maxTermsPerTopic is None:
topics = self.call("describeTopics")
else:
topics = self.call("describeTopics", maxTermsPerTopic)
return topics
@classmethod
def load(cls, sc, path):
"""Load the LDAModel from disk.
.. versionadded:: 1.5.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
path : str
Path to where the model is stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
model = callMLlibFunc("loadLDAModel", sc, path)
return LDAModel(model)
class LDA(object):
"""
Train Latent Dirichlet Allocation (LDA) model.
.. versionadded:: 1.5.0
"""
@classmethod
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
.. versionadded:: 1.5.0
Parameters
----------
rdd : :py:class:`pyspark.RDD`
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term count vectors are "bags of
words" with a fixed-size vocabulary (where the vocabulary size
is the length of the vector). Document IDs must be unique
and >= 0.
k : int, optional
Number of topics to infer, i.e., the number of soft cluster
centers.
(default: 10)
maxIterations : int, optional
Maximum number of iterations allowed.
(default: 20)
docConcentration : float, optional
Concentration parameter (commonly named "alpha") for the prior
placed on documents' distributions over topics ("theta").
(default: -1.0)
topicConcentration : float, optional
Concentration parameter (commonly named "beta" or "eta") for
the prior placed on topics' distributions over terms.
(default: -1.0)
seed : int, optional
Random seed for cluster initialization. Set as None to generate
seed based on system time.
(default: None)
checkpointInterval : int, optional
Period (in iterations) between checkpoints.
(default: 10)
optimizer : str, optional
LDAOptimizer used to perform the actual calculation. Currently
"em", "online" are supported.
(default: "em")
"""
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
docConcentration, topicConcentration, seed,
checkpointInterval, optimizer)
return LDAModel(model)
def _test():
import doctest
import numpy
import pyspark.mllib.clustering
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.mllib.clustering.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
NOTE: The new feature columns are being developed in feature_column_v2.py and
are a somewhat duplicate of the code here. Please make sure to update logic
in both places.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import training
from tensorflow.python.layers import base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _internal_input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
scope=None,
cols_to_output_tensors=None,
from_template=False):
"""See input_layer. `scope` is a name or variable scope to use."""
feature_columns = _normalize_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, _DenseColumn):
raise ValueError(
'Items of feature_columns must be a _DenseColumn. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
def _get_logits(): # pylint: disable=missing-docstring
builder = _LazyBuilder(features)
output_tensors = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
output_tensor = array_ops.reshape(
tensor, shape=(batch_size, num_elements))
output_tensors.append(output_tensor)
if cols_to_vars is not None:
# Retrieve any variables created (some _DenseColumn's don't create
# variables, in which case an empty list is returned).
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = output_tensor
_verify_static_batch_size_equality(output_tensors, ordered_columns)
return array_ops.concat(output_tensors, 1)
# If we're constructing from the `make_template`, that by default adds a
# variable scope with the name of the layer. In that case, we dont want to
# add another `variable_scope` as that would break checkpoints.
if from_template:
return _get_logits()
else:
with variable_scope.variable_scope(
scope, default_name='input_layer', values=features.values()):
return _get_logits()
@tf_export(v1=['feature_column.input_layer'])
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
cols_to_output_tensors=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
cols_to_output_tensors: If not `None`, must be a dictionary that will be
filled with a mapping from '_FeatureColumn' to the associated
output `Tensor`s.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(
features,
feature_columns,
weight_collections=weight_collections,
trainable=trainable,
cols_to_vars=cols_to_vars,
cols_to_output_tensors=cols_to_output_tensors)
class InputLayer(object):
"""An object-oriented version of `input_layer` that reuses variables."""
def __init__(self,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
name='feature_column_input_layer',
create_scope_now=True):
"""See `input_layer`."""
self._feature_columns = feature_columns
self._weight_collections = weight_collections
self._trainable = trainable
self._cols_to_vars = cols_to_vars
self._name = name
self._input_layer_template = template.make_template(
self._name, _internal_input_layer, create_scope_now_=create_scope_now)
self._scope = self._input_layer_template.variable_scope
def __call__(self, features):
return self._input_layer_template(
features=features,
feature_columns=self._feature_columns,
weight_collections=self._weight_collections,
trainable=self._trainable,
cols_to_vars=None,
from_template=True)
@property
def name(self):
return self._name
@property
def non_trainable_variables(self):
return self._input_layer_template.non_trainable_variables
@property
def non_trainable_weights(self):
return self._input_layer_template.non_trainable_weights
@property
def trainable_variables(self):
return self._input_layer_template.trainable_variables
@property
def trainable_weights(self):
return self._input_layer_template.trainable_weights
@property
def variables(self):
return self._input_layer_template.variables
@property
def weights(self):
return self._input_layer_template.weights
@tf_export(v1=['feature_column.linear_model'])
def linear_model(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a linear prediction `Tensor` based on given `feature_columns`.
This function generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `linear_model` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features, columns)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum" are
supported, with "sum" the default for linear model. "sqrtn" often achieves
good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "f"
}
```
with `sparse_combiner` as "mean", the linear model outputs consequently
are:
```
y_0 = 1.0 / 2.0 * ( w_a + w_b ) + w_d + b
y_1 = w_c + 1.0 / 3.0 * ( w_e + 2.0 * w_f ) + b
```
where `y_i` is the output, `b` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
weight_collections: A list of collection names to which the Variable will be
added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to associated list of `Variable`s. For
example, after the call, we might have cols_to_vars = {
_NumericColumn(
key='numeric_feature1', shape=(1,):
[<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],
'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],
_NumericColumn(
key='numeric_feature2', shape=(2,)):
[<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}
If a column creates no variables, its value will be an empty list. Note
that cols_to_vars will also contain a string key 'bias' that maps to a
list of Variables.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its shape
is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: if an item in `feature_columns` is neither a `_DenseColumn`
nor `_CategoricalColumn`.
"""
with variable_scope.variable_scope(None, 'linear_model') as vs:
model_name = _strip_leading_slashes(vs.name)
linear_model_layer = _LinearModel(
feature_columns=feature_columns,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable,
name=model_name)
retval = linear_model_layer(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(linear_model_layer.cols_to_vars())
return retval
def _add_to_collections(var, weight_collections):
"""Adds a var to the list of weight_collections provided.
Handles the case for partitioned and non-partitioned variables.
Args:
var: A variable or Partitioned Variable.
weight_collections: List of collections to add variable to.
"""
for weight_collection in weight_collections:
# The layer self.add_variable call already adds it to GLOBAL_VARIABLES.
if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:
continue
# TODO(rohanj): Explore adding a _get_variable_list method on `Variable`
# so that we don't have to do this check.
if isinstance(var, variables.PartitionedVariable):
for constituent_var in list(var):
ops.add_to_collection(weight_collection, constituent_var)
else:
ops.add_to_collection(weight_collection, var)
class _FCLinearWrapper(base.Layer):
"""Wraps a _FeatureColumn in a layer for use in a linear model.
See `linear_model` above.
"""
def __init__(self,
feature_column,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
name=None,
**kwargs):
super(_FCLinearWrapper, self).__init__(
trainable=trainable, name=name, **kwargs)
self._feature_column = feature_column
self._units = units
self._sparse_combiner = sparse_combiner
self._weight_collections = weight_collections
def build(self, _):
if isinstance(self._feature_column, _CategoricalColumn):
weight = self.add_variable(
name='weights',
shape=(self._feature_column._num_buckets, self._units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
else:
num_elements = self._feature_column._variable_shape.num_elements() # pylint: disable=protected-access
weight = self.add_variable(
name='weights',
shape=[num_elements, self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
_add_to_collections(weight, self._weight_collections)
self._weight_var = weight
self.built = True
def call(self, builder):
weighted_sum = _create_weighted_sum(
column=self._feature_column,
builder=builder,
units=self._units,
sparse_combiner=self._sparse_combiner,
weight_collections=self._weight_collections,
trainable=self.trainable,
weight_var=self._weight_var)
return weighted_sum
class _BiasLayer(base.Layer):
"""A layer for the bias term.
"""
def __init__(self,
units=1,
trainable=True,
weight_collections=None,
name=None,
**kwargs):
super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)
self._units = units
self._weight_collections = weight_collections
def build(self, _):
self._bias_variable = self.add_variable(
'bias_weights',
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
_add_to_collections(self._bias_variable, self._weight_collections)
self.built = True
def call(self, _):
return self._bias_variable
def _get_expanded_variable_list(variable):
if (isinstance(variable, variables.Variable) or
resource_variable_ops.is_resource_variable(variable)):
return [variable] # Single variable case.
else: # Must be a PartitionedVariable, so convert into a list.
return list(variable)
def _strip_leading_slashes(name):
return name.rsplit('/', 1)[-1]
class _LinearModel(training.Model):
"""Creates a linear model using feature columns.
See `linear_model` for details.
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
name=None,
**kwargs):
super(_LinearModel, self).__init__(name=name, **kwargs)
self._feature_columns = _normalize_feature_columns(
feature_columns)
self._weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in self._weight_collections:
self._weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in self._weight_collections:
self._weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
column_layers = {}
for column in sorted(self._feature_columns, key=lambda x: x.name):
with variable_scope.variable_scope(
None, default_name=column._var_scope_name) as vs: # pylint: disable=protected-access
# Having the fully expressed variable scope name ends up doubly
# expressing the outer scope (scope with which this method was called)
# in the name of the variable that would get created.
column_name = _strip_leading_slashes(vs.name)
column_layer = _FCLinearWrapper(column, units, sparse_combiner,
self._weight_collections, trainable,
column_name, **kwargs)
column_layers[column_name] = column_layer
self._column_layers = self._add_layers(column_layers)
self._bias_layer = _BiasLayer(
units=units,
trainable=trainable,
weight_collections=self._weight_collections,
name='bias_layer',
**kwargs)
self._cols_to_vars = {}
def cols_to_vars(self):
"""Returns a dict mapping _FeatureColumns to variables.
See `linear_model` for more information.
This is not populated till `call` is called i.e. layer is built.
"""
return self._cols_to_vars
def call(self, features):
with variable_scope.variable_scope(self.name):
for column in self._feature_columns:
if not isinstance(column, (_DenseColumn, _CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'_DenseColumn or _CategoricalColumn. Given: {}'.format(column))
weighted_sums = []
ordered_columns = []
builder = _LazyBuilder(features)
for layer in sorted(self._column_layers.values(), key=lambda x: x.name):
column = layer._feature_column # pylint: disable=protected-access
ordered_columns.append(column)
weighted_sum = layer(builder)
weighted_sums.append(weighted_sum)
self._cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES, scope=layer.scope_name)
_verify_static_batch_size_equality(weighted_sums, ordered_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias,
self._bias_layer( # pylint: disable=not-callable
builder,
scope=variable_scope.get_variable_scope()), # pylint: disable=not-callable
name='weighted_sum')
bias = self._bias_layer.variables[0]
self._cols_to_vars['bias'] = _get_expanded_variable_list(bias)
return predictions
def _add_layers(self, layers):
# "Magic" required for keras.Model classes to track all the variables in
# a list of layers.Layer objects.
# TODO(ashankar): Figure out API so user code doesn't have to do this.
for name, layer in layers.items():
setattr(self, 'layer-%s' % name, layer)
return layers
def _transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing all the `_FeatureColumn`s.
Returns:
A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with ops.name_scope(None, default_name=column.name):
outputs[column] = builder.get(column)
return outputs
@tf_export(v1=['feature_column.make_parse_example_spec'])
def make_parse_example_spec(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in `tf.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = categorical_column_with_vocabulary_file(...)
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError(
'All feature_columns must be _FeatureColumn instances. '
'Given: {}'.format(column))
config = column._parse_example_spec # pylint: disable=protected-access
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
def _embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""`_DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `_CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `_CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`_DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access
def _creator(weight_collections, scope):
embedding_column_layer = _EmbeddingColumnLayer(
embedding_shape=embedding_shape,
initializer=initializer,
weight_collections=weight_collections,
trainable=trainable,
name='embedding_column_layer')
return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable
return _EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
layer_creator=_creator,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def _numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return _NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, _NumericColumn):
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if (not boundaries or
not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return _BucketizedColumn(source_column, tuple(boundaries))
def _categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _HashedCategoricalColumn(key, hash_bucket_size, dtype)
def _categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `_CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return _VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
def _categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `_CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return _VocabularyListCategoricalColumn(
key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,
default_value=default_value, num_oov_buckets=num_oov_buckets)
def _categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `_CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `_CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return _IdentityCategoricalColumn(
key=key, num_buckets=num_buckets, default_value=default_value)
def _indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `_IndicatorColumn`.
"""
return _IndicatorColumn(categorical_column)
def _weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `_CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `_CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `_CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return _WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
def _crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `_CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `_CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.
ValueError: If any of the keys is `_HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, _CategoricalColumn)):
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except _HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key, _HashedCategoricalColumn):
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return _CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size,
hash_key=hash_key)
class _EmbeddingColumnLayer(base.Layer):
"""A layer that stores all the state required for a embedding column."""
def __init__(self,
embedding_shape,
initializer,
weight_collections=None,
trainable=True,
name=None,
**kwargs):
"""Constructor.
Args:
embedding_shape: Shape of the embedding variable used for lookup.
initializer: A variable initializer function to be used in embedding
variable initialization.
weight_collections: A list of collection names to which the Variable will
be added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name of the layer
**kwargs: keyword named properties.
"""
super(_EmbeddingColumnLayer, self).__init__(
trainable=trainable, name=name, **kwargs)
self._embedding_shape = embedding_shape
self._initializer = initializer
self._weight_collections = weight_collections
def set_weight_collections(self, weight_collections):
"""Sets the weight collections for the layer.
Args:
weight_collections: A list of collection names to which the Variable will
be added.
"""
self._weight_collections = weight_collections
def build(self, _):
self._embedding_weight_var = self.add_variable(
name='embedding_weights',
shape=self._embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self.trainable)
if self._weight_collections and not context.executing_eagerly():
_add_to_collections(self._embedding_weight_var, self._weight_collections)
self.built = True
def call(self, _):
return self._embedding_weight_var
@six.add_metaclass(abc.ABCMeta)
class _FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. Following is an example feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. User should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming and for name_scope."""
pass
@property
def _var_scope_name(self):
"""Returns string. Used for variable_scope. Defaults to self.name."""
return self.name
@abc.abstractmethod
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`).
Uses `inputs` to create an intermediate representation (usually a `Tensor`)
that other feature columns can use.
Example usage of `inputs`:
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will
be used as follows:
```python
raw_tensor = inputs.get('raw')
fc_tensor = inputs.get(input_fc)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). One possible implementation of
_parse_example_spec is as follows:
```python
spec = {'raw': tf.FixedLenFeature(...)}
spec.update(input_fc._parse_example_spec)
return spec
```
"""
pass
def _reset_config(self):
"""Resets the configuration in the column.
Some feature columns e.g. embedding or shared embedding columns might
have some state that is needed to be reset sometimes. Use this method
in that scenario.
"""
class _DenseColumn(_FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc._get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
"""
pass
def _create_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, _CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
builder=builder,
units=units,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column,
builder,
units,
weight_collections,
trainable,
weight_var=None):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=[num_elements, units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return math_ops.matmul(tensor, weight, name='weighted_sum')
class _CategoricalColumn(_FeatureColumn):
"""Represents a categorical feature.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ['id_tensor', 'weight_tensor'])
@abc.abstractproperty
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to
create `IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.get_variable`).
"""
pass
def _create_categorical_column_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=(column._num_buckets, units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return embedding_ops.safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class _SequenceDenseColumn(_FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ['dense_tensor', 'sequence_length'])
@abc.abstractmethod
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
"""Returns a `TensorSequenceLengthPair`."""
pass
class _LazyBuilder(object):
"""Handles caching of transformations while building the model.
`_FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`_LazyBuilder` caches all previously transformed columns.
Example:
We're trying to use the following `_FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `_LazyBuilder` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `_LazyBuilder`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `_FeatureColumn` key
means that this `Tensor` is the output of an existing `_FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`_FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `_FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `_FeatureColumn`.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column._transform_feature(self) # pylint: disable=protected-access
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
def _shape_offsets(shape):
"""Returns moving offset for each dimension given shape."""
offsets = []
for dim in reversed(shape):
if offsets:
offsets.append(dim * offsets[-1])
else:
offsets.append(dim)
offsets.reverse()
return offsets
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, _FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError('Items of feature_columns must be a _FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = dict()
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return feature_columns
class _NumericColumn(_DenseColumn,
collections.namedtuple('_NumericColumn', [
'key', 'shape', 'default_value', 'dtype',
'normalizer_fn'
])):
"""see `numeric_column`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing numeric feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
class _BucketizedColumn(_DenseColumn, _CategoricalColumn,
collections.namedtuple('_BucketizedColumn', [
'source_column', 'boundaries'])):
"""See `bucketized_column`."""
@property
def name(self):
return '{}_bucketized'.format(self.source_column.name)
@property
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
@property
def _num_buckets(self):
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None)
class _EmbeddingColumn(
_DenseColumn, _SequenceDenseColumn,
collections.namedtuple(
'_EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'layer_creator',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_embedding'.format(self.categorical_column.name)
return self._name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor_internal(self,
inputs,
weight_collections=None,
trainable=None):
"""Private method that follows the signature of _get_dense_tensor."""
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.layer_creator(
weight_collections=weight_collections,
scope=variable_scope.get_variable_scope())
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_graph_for_variable(var):
if isinstance(var, variables.PartitionedVariable):
return list(var)[0].graph
else:
return var.graph
class _SharedEmbeddingColumn(
_DenseColumn, _SequenceDenseColumn,
collections.namedtuple(
'_SharedEmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'shared_embedding_collection_name', 'ckpt_to_load_from',
'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_shared_embedding'.format(self.categorical_column.name)
return self._name
@property
def _var_scope_name(self):
return self.shared_embedding_collection_name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor_internal(self,
inputs,
weight_collections=None,
trainable=None):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
shared_embedding_collection = ops.get_collection(
self.shared_embedding_collection_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
if embedding_weights.get_shape() != embedding_shape:
raise ValueError(
'Shared embedding collection {} contains variable {} of '
'unexpected shape {}. Expected shape is {}. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(self.shared_embedding_collection_name,
embedding_weights.name,
embedding_weights.get_shape(), embedding_shape))
else:
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
ops.add_to_collection(self.shared_embedding_collection_name,
embedding_weights)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, six.integer_types):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class _HashedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_HashedCategoricalColumn',
['key', 'hash_bucket_size', 'dtype'])):
"""see `categorical_column_with_hash_bucket`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyFileCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyFileCategoricalColumn', (
'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',
'default_value'
))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
return lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyListCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyListCategoricalColumn', (
'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'
))):
"""See `categorical_column_with_vocabulary_list`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
return lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _IdentityCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_IdentityCategoricalColumn', (
'key', 'num_buckets', 'default_value'
))):
"""See `categorical_column_with_identity`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values, num_buckets, data=(values, num_buckets),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values, zero, data=(values,),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.num_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _WeightedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_WeightedCategoricalColumn', (
'categorical_column', 'weight_feature_key', 'dtype'
))):
"""See `weighted_categorical_column`."""
@property
def name(self):
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_feature(self, inputs):
weight_tensor = inputs.get(self.weight_feature_key)
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return (inputs.get(self.categorical_column), weight_tensor)
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
class _CrossedColumn(
_CategoricalColumn,
collections.namedtuple('_CrossedColumn',
['keys', 'hash_bucket_size', 'hash_key'])):
"""See `crossed_column`."""
@property
def name(self):
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, _FeatureColumn):
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def _parse_example_spec(self):
config = {}
for key in self.keys:
if isinstance(key, _FeatureColumn):
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
def _transform_feature(self, inputs):
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, _CategoricalColumn):
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `_CrossedColumn`.
Returns:
A list of strings or `_CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, _CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def _variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
Raises:
ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _verify_static_batch_size_equality(tensors, columns):
"""Validates that the first dim (batch size) of all tensors are equal or None.
Args:
tensors: list of tensors to check.
columns: list of feature columns matching tensors. Will be used for error
messaging.
Raises:
ValueError: if one of the tensors has a variant batch size
"""
# bath_size is a tf.Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
if tensors[i].shape.dims[0].value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = tensors[i].shape.dims[0]
elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, tensors[i].shape.dims[0]))
class _SequenceCategoricalColumn(
_CategoricalColumn,
collections.namedtuple(
'_SequenceCategoricalColumn', ['categorical_column'])):
"""Represents sequences of categorical data."""
@property
def name(self):
return self.categorical_column.name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return _CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.core.exceptions import ClientComponentHasNoStatus
from resource_management.core.resources.system import Execute
from resource_management.core.logger import Logger
from resource_management.core import shell
from setup_spark import setup_spark
class SparkClient(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
Execute(('ln','-sf', format('/usr/lib/hadoop-hdfs/hadoop-hdfs-client.jar'),'/usr/lib/spark/jars/hadoop-hdfs-client.jar'),
not_if=format("ls /usr/lib/spark/jars/hadoop-hdfs-client.jar"),
only_if=format("ls /usr/lib/hadoop-hdfs/hadoop-hdfs-client.jar"),
sudo=True)
def configure(self, env, upgrade_type=None, config_dir=None):
import params
env.set_params(params)
setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
def status(self, env):
raise ClientComponentHasNoStatus()
def get_component_name(self):
return "spark2-client"
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
Logger.info("Executing Spark2 Client Stack Upgrade pre-restart")
conf_select.select(params.stack_name, "spark", params.version)
stack_select.select("spark2-client", params.version)
if __name__ == "__main__":
SparkClient().execute()
|
import android,time
import random
droid = android.Android()
class Game:
"Generic game class."
def __init__(self,name):
self.name=name
def play(self):
pass
class GlobalThermonuclearWar(Game):
"Implementation of global thermonuclear war."
strategies = ["U.S. First Strike",
"U.S.S.R. First Strike",
"NATO / Warsaw Pact",
"Far East Strategy",
"U.S. / U.S.S.R. escalation",
"Middle East War",
"U.S.S.R. / China Attack",
"India / Pakistan War",
"Mediterranean War",
"Hong Kong Variant",
"SEATO Decapitating",
"Cuban Provocation",
"Inadvertent",
"Atlantic Heavy",
"Cuban Paramilitary",
"Nicaraguan Pre-emptive",
"Pacific Territorial",
"Burmese Theaterwide",
"Turkish Decoy",
"Argentina Escalation",
"Iceland Maximum",
"Arabian Theaterwide",
"U.S. Subversion",
"Australian Maneuver",
"Sudan Surprise",
"NATO Territorial",
"Zaire Alliance",
"Iceland Incident",
"English Escalation",
"Zaire Screen",
"Middle East Heavy",
"Mexican Takeover",
"Chad Alert",
"Saudi Maneuver",
"African Territorial",
"Ethiopian Calamity",
"Turkish Heavy",
"NATO Incursion",
"U.S. Defense",
"Cambodian Heavy",
"(Warsaw) Pact Medium",
"Arctic Minimal",
"Mexican Domestic",
"Taiwanese Theaterwide",
"Pacific Maneuver",
"Portugal Revolution",
"Albanian Decoy",
"Palestinian Local",
"Moroccan Minimal",
"Czech Option",
"French Alliance",
"Arabian Clandestine",
"Gabon Rebellion",
"Northern Maximum",
"SEATO Takeover",
"Hawaiian Escalation",
"Iranian Maneuver",
"NATO Containment",
"Swiss Incident",
"Cuba Minimal",
"Iceland Escalation",
"Vietnamese Retaliation",
"Syrian Provocation",
"Libyan Local",
"Gabon Takeover",
"Romanian War",
"Middle East Offensive",
"Denmark Massive",
"Chile Confrontation",
"South African Subversion",
"U.S.S.R. Alert",
"Nicaraguan Thrust",
"Greenland Domestic",
"Iceland Heavy",
"Kenya Option",
"Pacific Defense",
"Uganda Maximum",
"Thai Subversion",
"Romanian Strike",
"Pakistan Sovereignty",
"Afghan Misdirection",
"Thai Variation",
"Northern Territorial",
"Polish Paramilitary",
"South African Offensive",
"Panama Misdirection",
"Scandinavian Domestic",
"Jordan Pre-emptive",
"English Thrust",
"Burmese Manuever",
"Spain Counter",
"Arabian Offensive",
"Chad Interdiction",
"Taiwan Misdirection",
"Bangladesh Theaterwide",
"Ethiopian Local",
"Italian Takeover",
"Vietnamese Incident",
"English Pre-emptive",
"Denmark Alternate",
"Thai Confrontation",
"Taiwan Surprise",
"Brazilian Strike",
"Venezuela Sudden",
"Malaysian Alert",
"Israel Discretionary",
"Libyan Action",
"Palestinian Tactical",
"NATO Alternate",
"Cyprus Maneuver",
"Egypt Misdirection",
"Bangladesh Thrust",
"Kenya Defense",
"Bangladesh Containment",
"Vietnamese Strike",
"Albanian Containment",
"Gabon Surprise",
"Iraq Sovereignty",
"Vietnamese Sudden",
"Lebanon Interdiction",
"Taiwan Domestic",
"Algerian Sovereignty",
"Arabian Strike",
"Atlantic Sudden",
"Mongolian Thrust",
"Polish Decoy",
"Alaskan Discretionary",
"Canadian Thrust",
"Arabian Light",
"South African Domestic",
"Tunisian Incident",
"Malaysian Maneuver",
"Jamaica Decoy",
"Malaysian Minimal",
"Russian Sovereignty",
"Chad Option",
"Bangladesh War",
"Burmese Containment",
"Asian Theaterwide",
"Bulgarian Clandestine",
"Greenland Incursion",
"Egypt Surgical",
"Czech Heavy",
"Taiwan Confrontation",
"Greenland Maximum",
"Uganda Offensive",
"Caspian Defense"]
def __init__(self):
Game.__init__(self,"Global Thermonuclear War")
def play(self):
play_another_game=self.confirm_game()
if play_another_game is False:
self.learn()
elif play_another_game is True:
GuessTheNumber().play()
def confirm_game(self):
"Suggest a different game."
msg="Wouldn't you prefer a nice game of guess the number?"
droid.dialogCreateAlert(self.name,msg)
droid.dialogSetPositiveButtonText("Yes")
droid.dialogSetNegativeButtonText("Later")
droid.dialogShow()
droid.ttsSpeak(msg)
response=droid.dialogGetResponse().result
droid.dialogDismiss()
return get_dialog_result(response)
def learn(self):
"Learn that you can't win at this game."
droid.dialogCreateHorizontalProgress(self.name,
"Testing strategies…",len(self.strategies))
droid.dialogShow()
for i in range(len(self.strategies)):
droid.log("Strategy: "+self.strategies[i])
time.sleep(0.05)
droid.log("Winner: None")
droid.dialogSetCurrentProgress(i)
droid.dialogDismiss()
self.conclude_unwinnable()
def conclude_unwinnable(self):
msg="A strange game. The only winning move is not to play. \
How about a nice game of guess the number?"
droid.dialogCreateAlert(self.name,msg)
droid.dialogSetPositiveButtonText("Sure")
droid.dialogSetNegativeButtonText("No, thanks")
droid.dialogShow()
droid.ttsSpeak(msg)
response=droid.dialogGetResponse().result
droid.dialogDismiss()
if get_dialog_result(response) is True:
GuessTheNumber().play()
class GuessTheNumber(Game):
"Implementation of choos a number."
number = None
Playing=0
Quit=1
PlayerWon=2
ComputerWon=3
TooLow=-1
Correct=0
TooHigh=1
NotANumber=2
def __init__(self):
Game.__init__(self,"Choose the number")
def play(self):
game_state = {
'the_number': random.randint(1,100),
'guesses_left': 10,
'game_state': self.Playing }
while game_state['game_state'] == self.Playing:
guess = self.get_a_guess(game_state)
game_state = self.handle_guess(game_state,guess)
self.handle_game_end(game_state['game_state'])
def get_a_guess(self,state):
if state.has_key('guess_status'):
status = state['guess_status']
tries_left = state['guesses_left']
error="unknown"
if status == self.TooLow:
error = "too low"
elif status == self.TooHigh:
error = "too high"
elif status == self.NotANumber:
error = "not a number"
droid.ttsSpeak(error)
msg = "Your guess was %s, you have %d tries left." % \
(error, tries_left)
else:
droid.ttsSpeak("I'm thinking of a number…")
msg = "Guess the number between 1 and 100."
return droid.dialogGetInput(self.name, msg).result
def handle_guess(self,state,guess):
droid.log("Handling guess: state=%s, guess=%s" %
(state, guess))
number = state['the_number']
if guess is None:
state['game_state']=self.Quit
else:
try:
guess = int(guess,10)
if guess == number:
state['game_state']=self.PlayerWon
else:
guesses_left = state['guesses_left'] - 1
if guesses_left == 0:
state['game_state']=self.ComputerWon
else:
state['guesses_left']=guesses_left
if guess < number:
state['guess_status']=self.TooLow
else:
state['guess_status']=self.TooHigh
except ValueError:
state['guess_status']=self.NotANumber
return state
def handle_game_end(self,state):
if state==self.PlayerWon:
msg="Congratulations, you won! Would you like to play again?"
elif state==self.ComputerWon:
msg="You lost. Would you like to play again?"
else:
return
droid.dialogCreateAlert(self.name,msg)
droid.dialogSetPositiveButtonText("Yes")
droid.dialogSetNegativeButtonText("No")
droid.dialogShow()
droid.ttsSpeak(msg)
response=droid.dialogGetResponse().result
droid.dialogDismiss()
if get_dialog_result(response) is True:
self.play()
def get_dialog_result(response):
if response.has_key("which"):
result=response["which"]
if result=="positive":
return True
elif result=="negative":
return False
return None
def start_a_game():
""" Allows the player to choose to play a game. """
msg="Greetings Professor Falken. Shall we play a game?"
droid.dialogCreateAlert("Games, Python Edition", msg)
droid.dialogSetPositiveButtonText("Yes")
droid.dialogSetNegativeButtonText("No")
droid.dialogShow()
droid.ttsSpeak(msg)
response=droid.dialogGetResponse().result
droid.dialogDismiss()
return get_dialog_result(response)
def choose_game():
msg="Choose a game:"
droid.dialogCreateAlert(msg)
droid.dialogSetSingleChoiceItems(["Guess the number","Global thermonuclear war"])
droid.dialogSetPositiveButtonText("OK")
droid.dialogSetNegativeButtonText("Cancel")
droid.dialogShow()
droid.ttsSpeak(msg)
response=droid.dialogGetResponse().result
if get_dialog_result(response):
game=droid.dialogGetSelectedItems().result[0]
if game==0:
return GuessTheNumber()
elif game==1:
return GlobalThermonuclearWar()
return None
droid.dialogDismiss()
def main():
if start_a_game():
game=choose_game()
if game is not None:
game.play()
main()
|
'''Model utilities.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import re
from absl import flags
from PIL import Image
import collections
import os
import functools
import numpy as np
import tensorflow as tf
import tensorflow.compat.v2 as tf2
from tensorflow.python.tpu import tpu_function
FLAGS = flags.FLAGS
def build_learning_rate(
initial_lr,
global_step,
steps_per_epoch=None,
lr_decay_type='exponential',
decay_factor=0.97,
decay_epochs=2.4,
total_steps=None,
warmup_epochs=5,
start_from_step=0,
):
'''Build learning rate.'''
lr_step = global_step + start_from_step
if lr_decay_type == 'exponential':
assert steps_per_epoch is not None
decay_steps = steps_per_epoch * decay_epochs
lr = tf.train.exponential_decay(
initial_lr, lr_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(lr_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
if warmup_epochs:
tf.logging.info('Learning rate warmup_epochs: %d' % warmup_epochs)
warmup_steps = int(warmup_epochs * steps_per_epoch)
warmup_lr = (
initial_lr * tf.cast(lr_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(lr_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
def build_optimizer(learning_rate,
optimizer_name='rmsprop',
decay=0.9,
epsilon=0.001,
momentum=0.9):
'''Build optimizer.'''
if optimizer_name == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_name == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
elif optimizer_name == 'rmsprop':
tf.logging.info('Using RMSProp optimizer')
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
epsilon)
else:
tf.logging.fatal('Unknown optimizer:', optimizer_name)
return optimizer
class TpuBatchNormalization(tf.layers.BatchNormalization):
# class TpuBatchNormalization(tf.layers.BatchNormalization):
'''Cross replica batch normalization.'''
def __init__(self, fused=False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t, num_shards_per_group):
'''Calculates the average value of input tensor across TPU replicas.'''
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
else:
tf.logging.info('TpuBatchNormalization None')
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
'''Compute the mean and variance: it overrides the original _moments.'''
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if FLAGS.num_shards_per_group != -1:
num_shards_per_group = FLAGS.num_shards_per_group
else:
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
tf.logging.info('TpuBatchNormalization with num_shards_per_group %s',
num_shards_per_group)
if num_shards_per_group > 1 or num_shards_per_group == -2:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(
shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
def stochastic_depth(inputs, is_training, stochastic_depth_rate):
'''Apply stochastic depth.'''
if not is_training:
return inputs
# Compute keep_prob
# TODO(tanmingxing): add support for training progress.
keep_prob = 1.0 - stochastic_depth_rate
# Compute stochastic_depth tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.div(inputs, keep_prob) * binary_tensor
return output
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
'''Archive a checkpoint if the metric is better.'''
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
tf.logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
filenames = tf.gfile.Glob(ckpt_path + '.*')
if filenames is None:
tf.logging.info('No files to copy for checkpoint %s', ckpt_path)
return False
# Clear the old folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.gfile.Exists(dst_dir):
tf.gfile.DeleteRecursively(dst_dir)
tf.gfile.MakeDirs(dst_dir)
# Write checkpoints.
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_name,
all_model_checkpoint_paths=[ckpt_name])
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
tf.logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
return True
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
'''Wrap keras DepthwiseConv2D to tf.layers.'''
pass
def save_pic(uint8_arr, filename, log=True):
if log:
tf.logging.info('saving {}'.format(filename))
img = Image.fromarray(uint8_arr)
with tf.gfile.Open(filename, 'wb') as ouf:
img.save(ouf, subsampling=0, quality=100)
def int64_feature(value):
'''Wrapper for inserting int64 features into Example proto.'''
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value):
'''Wrapper for inserting float features into Example proto.'''
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
'''Wrapper for inserting bytes features into Example proto.'''
if six.PY3 and isinstance(value, six.text_type):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
class ImageCoder(object):
'''Helper class that provides TensorFlow image coding utilities.'''
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(self._encode_jpeg_data,
format='rgb', quality=100)
def encode_jpeg(self, image):
image_data = self._sess.run(self._encode_jpeg,
feed_dict={self._encode_jpeg_data: image})
return image_data
def iterate_through_dataset(dst):
iter = dst.make_initializable_iterator()
elem = iter.get_next()
cnt = 0
with tf.Session() as sess:
sess.run(iter.initializer)
try:
while True:
features = sess.run(elem)
yield features
except tf.errors.OutOfRangeError:
pass
def get_assignment_map_from_checkpoint(vars_list, init_checkpoint, only_teacher_model=False):
graph_to_ckpt_map = {}
assignment_map = {}
for var in vars_list:
ori_name = var.name
ckpt_name = ori_name[:-len(':0')]
if 'global_step' in ori_name:
continue
if only_teacher_model:
# only initialize the teacher model
if 'teacher_model' not in ori_name:
continue
ckpt_name = ckpt_name[len('teacher_model/'):]
if 'RMSProp' not in ckpt_name and 'ExponentialMovingAverage' not in ckpt_name:
ckpt_name += '/ExponentialMovingAverage'
graph_to_ckpt_map[ori_name] = ckpt_name
assignment_map[ckpt_name] = var
init_vars = tf.train.list_variables(init_checkpoint)
initialized_variable = {}
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in assignment_map:
continue
initialized_variable[name] = True
new_assignment_map = {}
for ckpt_name in assignment_map:
if ckpt_name not in initialized_variable:
block_name = ckpt_name.split('/')[1]
assert False, ckpt_name + ' not found'
else:
new_assignment_map[ckpt_name] = assignment_map[ckpt_name]
return new_assignment_map, graph_to_ckpt_map
def construct_scalar_host_call(
metric_dict,
):
metric_names = list(metric_dict.keys())
def host_call_fn(gs, *args):
gs = gs[0]
# Host call fns are executed FLAGS.iterations_per_loop times after one
# TPU loop is finished, setting max_queue value to the same as number of
# iterations will make the summary writer only flush the data to storage
# once per loop.
with tf2.summary.create_file_writer(
FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with tf2.summary.record_if(tf.math.equal(tf.math.floormod(gs, FLAGS.iterations_per_loop), 0)):
for i, name in enumerate(metric_names):
scalar = args[i][0]
# with tf.contrib.summary.record_summaries_every_n_global_steps(100, gs):
tf2.summary.scalar(name, scalar, step=gs)
return tf.summary.all_v2_summary_ops()
global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1])
other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names]
host_call = (host_call_fn, [global_step_tensor] + other_tensors)
return host_call
def get_all_variable():
var_list = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
var_list.append(v)
var_list = list(set(var_list))
var_list = sorted(var_list, key=lambda var: var.name)
return var_list
def init_from_ckpt(scaffold_fn):
all_var_list = get_all_variable()
all_var_list = sorted(all_var_list, key=lambda var: var.name)
if FLAGS.teacher_model_name:
init_ckpt = FLAGS.teacher_model_path
else:
init_ckpt = FLAGS.init_model_path
assignment_map, graph_to_ckpt_map = get_assignment_map_from_checkpoint(
all_var_list, init_ckpt, FLAGS.teacher_model_name is not None)
if FLAGS.use_tpu:
def tpu_scaffold():
tf.logging.info('initializing from {}'.format(init_ckpt))
tf.train.init_from_checkpoint(init_ckpt, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_ckpt, assignment_map)
tf.logging.info('**** Variables ****')
for var in all_var_list:
init_string = ''
if var.name in graph_to_ckpt_map:
init_string = ', *INIT_FROM_CKPT* <== {}'.format(
graph_to_ckpt_map[var.name])
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape,
init_string)
return scaffold_fn
def get_filename(data_dir, file_prefix, shard_id, num_shards):
filename = os.path.join(
data_dir,
'%s-%05d-of-%05d' % (file_prefix, shard_id, num_shards))
tf.logging.info('processing %s', filename)
return filename
def get_dst_from_filename(filename, data_type, total_replicas=1, worker_id=0, get_label=False):
input_files = [filename]
if FLAGS.data_type == 'tfrecord':
buffer_size = 8 * 1024 * 1024
dst = tf.data.TFRecordDataset(input_files, buffer_size=buffer_size)
dst = dst.shard(total_replicas, worker_id)
dst = dst.map(parse_tfrecord, num_parallel_calls=16)
else:
assert False
return dst
def parse_tfrecord(encoded_example):
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
}
parsed = tf.parse_single_example(encoded_example, keys_to_features)
return parsed
def decode_raw_image(contents, channels=0):
'''Decodes an image, ensuring that the result is height x width x channels.'''
image = tf.image.decode_image(contents, channels)
# Note: GIFs are decoded with 4 dimensions [num_frames, height, width, 3]
image = tf.cond(
tf.equal(tf.rank(image), 4),
lambda: image[0, :], # Extract first frame
lambda: image)
image_channel_shape = tf.shape(image)[2]
image = tf.cond(
tf.equal(image_channel_shape, 1),
lambda: tf.image.grayscale_to_rgb(image), lambda: image)
image.set_shape([None, None, 3])
return image
def get_reassign_filename(data_dir, file_prefix, shard_id, num_shards, worker_id):
filename = os.path.join(
data_dir,
'%s-%d-%05d-of-%05d' % (file_prefix, worker_id, shard_id, num_shards))
tf.logging.info('writing to %s', filename)
return filename
def get_uid_list():
# get the mapping from class index to class name
return [str(i) for i in range(FLAGS.num_label_classes)]
def label_dataset(worker_id, prediction_dir, shard_id, num_shards):
def label_dst_parser(value):
keys_to_features = {
'probabilities': tf.FixedLenFeature([FLAGS.num_label_classes], tf.float32),
'classes': tf.FixedLenFeature([], tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
features = {}
features['probabilities'] = tf.cast(
tf.reshape(parsed['probabilities'], shape=[FLAGS.num_label_classes]), dtype=tf.float32)
features['classes'] = tf.cast(
tf.reshape(parsed['classes'], shape=[]), dtype=tf.int32)
return features
input_file = os.path.join(
prediction_dir,
'train-info-%.5d-of-%.5d-%.5d' % (shard_id, num_shards, worker_id))
dst = tf.data.Dataset.list_files(input_file)
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
dst = dst.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset, cycle_length=1))
dst = dst.apply(
tf.data.experimental.map_and_batch(
label_dst_parser, batch_size=1,
num_parallel_batches=16))
dst = dst.prefetch(tf.data.experimental.AUTOTUNE)
return dst
|
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_tir
def test_lower_to_tir():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
expr = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
lower_to_tir(mod["main"])
if __name__ == "__main__":
pytest.main([__file__])
|
from requests import get
from urllib import quote
from sys import argv
from multiprocessing import Pool
import webbrowser
import os
import unicodedata
from lockfile import FileLock
import re
glbl_port = None
def dl_pg(ip, timeout=1):
try:
url = 'http://'+ip+':'+str(glbl_port)
print "[*] Downloading html page from: "+url
url_attr_regex = "((?<=src=[\"'])|(?<=href=.))(?!(http(s|)(:|%3[Aa])))([0-9A-Za-z%?&#_=+./~])*(?=['\"])"
data = unicodedata.normalize('NFKD', get(url, timeout=timeout).text).encode('ascii','ignore')
data = re.sub(r'[^\x00-\x7F]+', '', data)
data = re.sub(url_attr_regex, lambda mtch: ( (url+mtch.group()) if mtch.group().startswith('/') else (url+'/'+mtch.group()) ), data )
return data
except Exception,e:
return '<!DOCTYPE html><html><head><meta charset="utf-8"></head><body><h1>Failed to retrieve page (%s).</h1></body></html>' % str(e)
def html2iframe(html):
ret = '<iframe width="800" height="600" src="data:text/html;charset=utf-8,%s"></iframe>\n' % quote(html)
return ret
def append_entry(ip):
src = dl_pg(ip)
lock = FileLock("output.html")
lock.acquire()
with open("output.html", 'a') as f:
f.write('<h5>%s</h5><br>\n' % ip)
f.write(html2iframe(src)+'<br>')
lock.release()
def gen_report(ip_list, num_procs=50):
with open('output.html', 'w') as f:
f.write('<!DOCTYPE html><html><head><meta charset="utf-8"></head><body>\n')
pool = Pool(num_procs)
pool.map(append_entry, ip_list)
with open('output.html', 'a') as f:
f.write('</body></html>')
def main(args):
if len(args)!=4:
print "Usage: %s [list of hosts] [port] [number of concurrent tasks]" % args[0]
return 1
global glbl_port
glbl_port = int(args[2])
ip_list = []
with open(args[1], 'r') as f:
ip_list = f.read().split('\n')
gen_report(ip_list, num_procs=int(args[3]))
webbrowser.open_new_tab('file:///'+os.path.abspath('output.html'))
return 0
if __name__ == '__main__':
main(argv)
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from ..models import Feedback
class CreateViewTestCase(TestCase):
url = reverse('feedback:create')
def _setup_session(self):
# Setup the test client session
# Based on snippets from: https://code.djangoproject.com/ticket/10899
self.client.cookies[settings.SESSION_COOKIE_NAME] = 'fake'
session = self.client.session
session['deleted_email'] = 'test@domain.net'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
def test_template_used(self):
self._setup_session()
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'feedback/create.html')
def test_feedback_creation(self):
self._setup_session()
data = {
'question_1': 'on',
'question_2': 'on',
'question_3': '',
'question_4': 'on',
'question_5': '',
'comments': 'A nice comment',
}
next_url = reverse('feedback:create')
resp = self.client.post('{}?next={}'.format(self.url, next_url), data)
self.assertRedirects(resp, next_url)
self.assertEqual(Feedback.objects.count(), 1)
feedback = Feedback.objects.get()
self.assertEqual(feedback.email, 'test@domain.net')
self.assertTrue(feedback.question_1)
self.assertTrue(feedback.question_2)
self.assertFalse(feedback.question_3)
self.assertTrue(feedback.question_4)
self.assertFalse(feedback.question_5)
self.assertEqual(feedback.comments, 'A nice comment')
self.assertNotIn('deleted_email', self.client.session,
'Session wasn\'t properly cleaned')
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import prefix_limit
class ipv6_labeled_unicast(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv6 Labeled Unicast configuration options
"""
__slots__ = ("_path_helper", "_extmethods", "__prefix_limit")
_yang_name = "ipv6-labeled-unicast"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv6-labeled-unicast",
]
def _get_prefix_limit(self):
"""
Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_labeled_unicast/prefix_limit (container)
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
return self.__prefix_limit
def _set_prefix_limit(self, v, load=False):
"""
Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_labeled_unicast/prefix_limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_limit() directly.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefix_limit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__prefix_limit = t
if hasattr(self, "_set"):
self._set()
def _unset_prefix_limit(self):
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit)
_pyangbind_elements = OrderedDict([("prefix_limit", prefix_limit)])
from . import prefix_limit
class ipv6_labeled_unicast(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv6 Labeled Unicast configuration options
"""
__slots__ = ("_path_helper", "_extmethods", "__prefix_limit")
_yang_name = "ipv6-labeled-unicast"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv6-labeled-unicast",
]
def _get_prefix_limit(self):
"""
Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_labeled_unicast/prefix_limit (container)
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
return self.__prefix_limit
def _set_prefix_limit(self, v, load=False):
"""
Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_labeled_unicast/prefix_limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_limit() directly.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefix_limit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__prefix_limit = t
if hasattr(self, "_set"):
self._set()
def _unset_prefix_limit(self):
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit)
_pyangbind_elements = OrderedDict([("prefix_limit", prefix_limit)])
|
from sqlalchemy import sql, types, util
from sqlalchemy.engine import default
from sqlalchemy.sql import compiler
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy_hana import types as hana_types
RESERVED_WORDS = {
'all', 'alter', 'as', 'before', 'begin', 'both', 'case', 'char',
'condition', 'connect', 'cross', 'cube', 'current_connection',
'current_date', 'current_schema', 'current_time', 'current_timestamp',
'current_transaction_isolation_level', 'current_user', 'current_utcdate',
'current_utctime', 'current_utctimestamp', 'currval', 'cursor',
'declare', 'distinct', 'else', 'elseif', 'end', 'except', 'exception',
'exec', 'false', 'for', 'from', 'full', 'group', 'having', 'if', 'in',
'inner', 'inout', 'intersect', 'into', 'is', 'join', 'leading', 'left',
'limit', 'loop', 'minus', 'natural', 'nchar', 'nextval', 'null', 'on',
'order', 'out', 'prior', 'return', 'returns', 'reverse', 'right',
'rollup', 'rowid', 'select', 'session_user', 'set', 'sql', 'start',
'sysuuid', 'tablesample', 'top', 'trailing', 'true', 'union', 'unknown',
'using', 'utctimestamp', 'values', 'when', 'where', 'while', 'with'
}
class HANAIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class HANAStatementCompiler(compiler.SQLCompiler):
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".NEXTVAL"
def default_from(self):
return " FROM DUMMY"
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
# Dirty Hack but HANA only support OFFSET with LIMIT <integer>
text += "\n LIMIT 999999"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def for_update_clause(self, select, **kw):
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tmp += " OF " + ", ".join(
self.process(elem, **kw) for elem
in select._for_update_arg.of
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
return tmp
class HANATypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_):
return self.visit_TINYINT(type_)
def visit_NUMERIC(self, type_):
return self.visit_DECIMAL(type_)
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_DOUBLE(self, type_):
return "DOUBLE"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_CLOB(type_, **kw)
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_NCLOB(type_, **kw)
class HANADDLCompiler(compiler.DDLCompiler):
def visit_check_constraint(self, constraint):
"""HANA doesn't support check constraints."""
return None
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def visit_create_table(self, create):
table = create.element
# The table._prefixes list outlives the current compilation, meaning changing the list
# will change it globally. To prevent adding the same prefix multiple times, it is
# removed again after the super-class'es visit_create_table call, which consumes the
# table prefixes.
table_type = table.kwargs.get('hana_table_type')
appended_index = None
if table_type:
appended_index = len(table._prefixes)
table._prefixes.append(table_type.upper())
result = super(HANADDLCompiler, self).visit_create_table(create)
if appended_index is not None:
table._prefixes.pop(appended_index)
return result
class HANAExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
seq = self.dialect.identifier_preparer.format_sequence(seq)
return self._execute_scalar("SELECT %s.NEXTVAL FROM DUMMY" % seq, type_)
class HANABaseDialect(default.DefaultDialect):
name = "hana"
default_paramstyle = 'format'
statement_compiler = HANAStatementCompiler
type_compiler = HANATypeCompiler
ddl_compiler = HANADDLCompiler
preparer = HANAIdentifierPreparer
execution_ctx_cls = HANAExecutionContext
encoding = "cesu-8"
convert_unicode = True
supports_unicode_statements = True
supports_unicode_binds = True
requires_name_normalize = True
supports_sequences = True
supports_native_decimal = True
# ischema_names can be empty dict, not needed for reading lobs...
ischema_names = {} # ischema_names
colspecs = {
types.Boolean: hana_types.BOOLEAN,
types.Date: hana_types.DATE,
types.Time: hana_types.TIME,
types.DateTime: hana_types.TIMESTAMP,
types.LargeBinary: hana_types.HanaBinary,
types.Text: hana_types.HanaText,
types.UnicodeText: hana_types.HanaUnicodeText
}
postfetch_lastrowid = False
implicit_returning = False
supports_empty_insert = False
supports_native_boolean = False
supports_default_values = False
supports_sane_multi_rowcount = False
def __init__(self, auto_convert_lobs=True, **kwargs):
super(HANABaseDialect, self).__init__(**kwargs)
self.auto_convert_lobs = auto_convert_lobs
def on_connect(self):
return None
def _get_server_version_info(self, connection):
pass
def _get_default_schema_name(self, connection):
return self.normalize_name(connection.engine.url.username.upper())
def _check_unicode_returns(self, connection):
return True
def _check_unicode_description(self, connection):
return True
def normalize_name(self, name):
if name is None:
return None
if name.upper() == name and not \
self.identifier_preparer._requires_quotes(name.lower()):
name = name.lower()
elif name.lower() == name:
return quoted_name(name, quote=True)
return name
def denormalize_name(self, name):
if name is None:
return None
if name.lower() == name and not \
self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
return name
def has_table(self, connection, table_name, schema=None):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT 1 FROM TABLES "
"WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table",
).bindparams(
schema=self.denormalize_name(schema),
table=self.denormalize_name(table_name)
)
)
return bool(result.first())
def has_sequence(self, connection, sequence_name, schema=None):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT 1 FROM SEQUENCES "
"WHERE SCHEMA_NAME=:schema AND SEQUENCE_NAME=:sequence",
).bindparams(
schema=self.denormalize_name(schema),
sequence=self.denormalize_name(sequence_name)
)
)
return bool(result.first())
def get_schema_names(self, connection, **kwargs):
result = connection.execute(
sql.text("SELECT SCHEMA_NAME FROM SCHEMAS")
)
return list([
self.normalize_name(name) for name, in result.fetchall()
])
def get_table_names(self, connection, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT TABLE_NAME FROM TABLES WHERE SCHEMA_NAME=:schema",
).bindparams(
schema=self.denormalize_name(schema),
)
)
tables = list([
self.normalize_name(row[0]) for row in result.fetchall()
])
return tables
def get_view_names(self, connection, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT VIEW_NAME FROM VIEWS WHERE SCHEMA_NAME=:schema",
).bindparams(
schema=self.denormalize_name(schema),
)
)
views = list([
self.normalize_name(row[0]) for row in result.fetchall()
])
return views
def get_view_definition(self, connection, view_name, schema=None, **kwargs):
schema = schema or self.default_schema_name
return connection.execute(
sql.text(
"SELECT DEFINITION FROM VIEWS WHERE VIEW_NAME=:view_name AND SCHEMA_NAME=:schema LIMIT 1",
).bindparams(
view_name=self.denormalize_name(view_name),
schema=self.denormalize_name(schema),
)
).scalar()
def get_columns(self, connection, table_name, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"""SELECT COLUMN_NAME, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE FROM (
SELECT SCHEMA_NAME, TABLE_NAME, COLUMN_NAME, POSITION, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE FROM SYS.TABLE_COLUMNS
UNION ALL
SELECT SCHEMA_NAME, VIEW_NAME AS TABLE_NAME, COLUMN_NAME, POSITION, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE FROM SYS.VIEW_COLUMNS
) AS COLUMS
WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table
ORDER BY POSITION"""
).bindparams(
schema=self.denormalize_name(schema),
table=self.denormalize_name(table_name)
)
)
columns = []
for row in result.fetchall():
column = {
'name': self.normalize_name(row[0]),
'default': row[2],
'nullable': row[3] == "TRUE"
}
if hasattr(types, row[1]):
column['type'] = getattr(types, row[1])
elif hasattr(hana_types, row[1]):
column['type'] = getattr(hana_types, row[1])
else:
util.warn("Did not recognize type '%s' of column '%s'" % (
row[1], column['name']
))
column['type'] = types.NULLTYPE
if column['type'] == types.DECIMAL:
column['type'] = types.DECIMAL(row[4], row[5])
elif column['type'] == types.VARCHAR:
column['type'] = types.VARCHAR(row[4])
columns.append(column)
return columns
def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
lookup_schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT COLUMN_NAME, REFERENCED_SCHEMA_NAME, "
"REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME "
"FROM REFERENTIAL_CONSTRAINTS "
"WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table "
"ORDER BY CONSTRAINT_NAME, POSITION"
).bindparams(
schema=self.denormalize_name(lookup_schema),
table=self.denormalize_name(table_name)
)
)
foreign_keys = []
for row in result:
foreign_key = {
"name": None, # No named foreign key support
"constrained_columns": [self.normalize_name(row[0])],
"referred_schema": None,
"referred_table": self.normalize_name(row[2]),
"referred_columns": [self.normalize_name(row[3])],
}
if row[1] != self.denormalize_name(self.default_schema_name):
foreign_key["referred_schema"] = self.normalize_name(row[1])
foreign_keys.append(foreign_key)
return foreign_keys
def get_indexes(self, connection, table_name, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
'SELECT "INDEX_NAME", "COLUMN_NAME", "CONSTRAINT" '
"FROM INDEX_COLUMNS "
"WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table "
"ORDER BY POSITION"
).bindparams(
schema=self.denormalize_name(schema),
table=self.denormalize_name(table_name)
)
)
indexes = {}
for name, column, constraint in result.fetchall():
if name.startswith("_SYS"):
continue
name = self.normalize_name(name)
column = self.normalize_name(column)
if name not in indexes:
indexes[name] = {
"name": name,
"unique": False,
"column_names": [column]
}
if constraint is not None:
indexes[name]["unique"] = "UNIQUE" in constraint.upper()
else:
indexes[name]["column_names"].append(column)
return list(indexes.values())
def get_pk_constraint(self, connection, table_name, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT CONSTRAINT_NAME, COLUMN_NAME FROM CONSTRAINTS "
"WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table AND "
"IS_PRIMARY_KEY='TRUE' "
"ORDER BY POSITION"
).bindparams(
schema=self.denormalize_name(schema),
table=self.denormalize_name(table_name)
)
)
constraint_name = None
constrained_columns = []
for row in result.fetchall():
constraint_name = row[0]
constrained_columns.append(self.normalize_name(row[1]))
return {
"name": self.normalize_name(constraint_name),
"constrained_columns": constrained_columns
}
def get_unique_constraints(self, connection, table_name, schema=None, **kwargs):
schema = schema or self.default_schema_name
result = connection.execute(
sql.text(
"SELECT CONSTRAINT_NAME, COLUMN_NAME FROM CONSTRAINTS "
"WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table AND "
"IS_UNIQUE_KEY='TRUE' "
"ORDER BY CONSTRAINT_NAME, POSITION"
).bindparams(
schema=self.denormalize_name(schema),
table=self.denormalize_name(table_name)
)
)
constraints = []
parsing_constraint = None
for constraint_name, column_name in result.fetchall():
if parsing_constraint != constraint_name:
# Start with new constraint
parsing_constraint = constraint_name
constraint = {'name': None, 'column_names': []}
if not constraint_name.startswith('_SYS'):
# Constraint has user-defined name
constraint['name'] = self.normalize_name(constraint_name)
constraints.append(constraint)
constraint['column_names'].append(self.normalize_name(column_name))
return constraints
class HANAPyHDBDialect(HANABaseDialect):
driver = 'pyhdb'
default_paramstyle = 'qmark'
@classmethod
def dbapi(cls):
import pyhdb
pyhdb.paramstyle = cls.default_paramstyle
return pyhdb
def create_connect_args(self, url):
kwargs = url.translate_connect_args(username="user")
kwargs.setdefault("port", 30015)
return (), kwargs
def is_disconnect(self, error, connection, cursor):
if connection is None:
return True
return connection.closed
class HANAHDBCLIDialect(HANABaseDialect):
driver = 'hdbcli'
default_paramstyle = 'qmark'
@classmethod
def dbapi(cls):
import hdbcli.dbapi
hdbcli.dbapi.paramstyle = cls.default_paramstyle
return hdbcli.dbapi
def create_connect_args(self, url):
kwargs = url.translate_connect_args(host="address", username="user")
kwargs.setdefault("port", 30015)
return (), kwargs
def connect(self, *args, **kwargs):
connection = super(HANAHDBCLIDialect, self).connect(*args, **kwargs)
connection.setautocommit(False)
return connection
def is_disconnect(self, error, connection, cursor):
if connection:
return not connection.isconnected()
if isinstance(error, self.dbapi.Error):
if error.errorcode == -10709:
return True
return super(HANAHDBCLIDialect, self).is_disconnect(error, connection, cursor)
|
from abc import ABCMeta
from mycroft.plugin.util import load_class, load_plugin
from mycroft.util import log
class MustOverride(NotImplementedError):
pass
class OptionPlugin:
"""
>>> # --- mycroft/fruits/fruit_plugin.py ---
>>> from abc import abstractmethod
>>> from mycroft.plugin.base_plugin import BasePlugin
>>>
>>> class FruitPlugin(BasePlugin):
... def __init__(self, rt, side_dish):
... super().__init__(rt)
... self.side_dish = side_dish
... @abstractmethod
... def eat(self):
... pass
...
>>> # --- mycroft/fruits/apple.py ---
>>> class AppleFruit(FruitPlugin):
... def eat(self):
... print('Eating apple with', self.side_dish)
...
>>> # --- mycroft/services/fruit_service.py ---
>>> from mycroft.services.service_plugin import ServicePlugin
>>> class FruitService(
... ServicePlugin, OptionPlugin, metaclass=OptionMeta, base=FruitPlugin,
... package='mycroft.fruits', suffix='_fruit', default='apple'
... ):
... def __init__(self, rt):
... ServicePlugin.__init__(self, rt)
... OptionPlugin.__init__(self, rt, 'curry')
... # Functions are filled in with functions from base class
...
>>> # --- ~/.mycroft/skills/eat_fruit_skill.py ---
>>> from mycroft_core import MycroftSkill, intent_handler
>>> class EatFruitSkill(MycroftSkill):
... @intent_handler('eat.fruit')
... def eat_fruit(self):
... self.rt.fruit.eat()
"""
def __init__(self, *args, __module__, **kwargs):
self._plugin = None
if not isinstance(type(self), OptionMeta):
raise RuntimeError('{} must have OptionMeta as a metaclass'.format(
self.__class__.__name__
))
self._class, self._plugin = self.__load_plugin(__module__, args, kwargs)
self.__copy_functions(self._plugin)
if getattr(self, '_plugin_path', '').count('.') > 1:
log.debug('Loaded {} as {}.'.format(self._class.__name__, self.__class__.__name__))
def __load_plugin(self, module, args, kwargs):
package, suffix, default = self._package_, self._suffix_, self._default_
plugin_path = getattr(self, '_plugin_path', '')
cls = load_class(package, suffix, module, plugin_path)
plugin = load_plugin(cls, args, kwargs)
if not plugin and module != default:
cls = load_class(package, suffix, default, plugin_path)
plugin = load_plugin(cls, args, kwargs)
if not plugin:
raise RuntimeError('Both modules failed to load for {}'.format(self.__class__.__name__))
return cls, plugin
def __copy_functions(self, obj):
for name in dir(obj):
if not name.startswith('_'):
value = getattr(obj, name)
if callable(value):
setattr(self, name, value)
def __getattr__(self, item):
if item.startswith('_'):
raise AttributeError(item)
return getattr(self._plugin, item)
def __str__(self):
return self._base_class.__name__ + ': ' + str(self._plugin)
def __repr__(self):
return self._base_class.__name__ + ': ' + repr(self._plugin)
class OptionMeta(ABCMeta):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
if not hasattr(obj, '_plugin'):
raise RuntimeError('OptionPlugin.__init__(<args>) never called in {}.__init__'.format(
cls.__name__
))
return obj
def __new__(mcs, name, bases, namespace, base, package, suffix, default):
return super().__new__(mcs, name, bases, {
'_base_': base,
'_package_': package,
'_suffix_': suffix,
'_default_': default,
**namespace
})
def __init__(cls, name, bases, namespace, *_, **__):
super().__init__(name, bases, namespace)
|
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import ftrl
from tensorflow.python.training import training_util
_LEARNING_RATE = 0.2
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return ftrl.FtrlOptimizer(learning_rate=learning_rate)
def _linear_model_fn(features, labels, mode, params, config):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: Dict of `Tensor`.
labels: `Tensor` of shape `[batch_size, logits_dimension]`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or params are invalid.
"""
head = params['head']
feature_columns = tuple(params['feature_columns'])
optimizer = optimizers.get_optimizer_instance(
params.get('optimizer') or _get_default_optimizer(feature_columns),
learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = params.get('partitioner') or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'linear',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
logits = feature_column_lib.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
"""
def __init__(self,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes, weight_column=weight_column,
label_vocabulary=label_vocabulary)
super(LinearClassifier, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params={
'head': head,
'feature_columns': feature_columns,
'optimizer': optimizer,
'partitioner': partitioner,
})
class LinearRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear regression problems.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column` is not `None`:
key=weight_column, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
Loss is calculated by using mean squared error.
"""
def __init__(self,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
"""Initializes a `LinearRegressor` instance.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
"""
super(LinearRegressor, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params={
# pylint: disable=protected-access
'head':
head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension,
weight_column=weight_column),
# pylint: enable=protected-access
'feature_columns':
feature_columns,
'optimizer':
optimizer,
'partitioner':
partitioner,
})
|
from insights.parsers.blkid import BlockIDInfo
from insights.tests import context_wrap
BLKID_INFO = """
/dev/sda1: UUID="3676157d-f2f5-465c-a4c3-3c2a52c8d3f4" TYPE="xfs"
/dev/sda2: UUID="UVTk76-UWOc-vk7s-galL-dxIP-4UXO-0jG4MH" TYPE="LVM2_member"
/dev/mapper/rhel_hp--dl160g8--3-root: UUID="11124c1d-990b-4277-9f74-c5a34eb2cd04" TYPE="xfs"
/dev/mapper/rhel_hp--dl160g8--3-swap: UUID="c7c45f2d-1d1b-4cf0-9d51-e2b0046682f8" TYPE="swap"
/dev/mapper/rhel_hp--dl160g8--3-home: UUID="c7116820-f2de-4aee-8ea6-0b23c6491598" TYPE="xfs"
/dev/mapper/rhel_hp--dl160g8--3-lv_test: UUID="d403bcbd-0eea-4bff-95b9-2237740f5c8b" TYPE="ext4"
/dev/cciss/c0d1p3: LABEL="/u02" UUID="004d0ca3-373f-4d44-a085-c19c47da8b5e" TYPE="ext3"
/dev/loop0: LABEL="Satellite-5.6.0 x86_64 Disc 0" TYPE="iso9660"
/dev/block/253:1: UUID="f8508c37-eeb1-4598-b084-5364d489031f" TYPE="ext2"
""".strip()
EXPECTED_RESULTS = [{'NAME': "/dev/sda1",
'UUID': "3676157d-f2f5-465c-a4c3-3c2a52c8d3f4",
'TYPE': "xfs"},
{'NAME': "/dev/sda2",
'UUID': "UVTk76-UWOc-vk7s-galL-dxIP-4UXO-0jG4MH",
'TYPE': "LVM2_member"},
{'NAME': "/dev/mapper/rhel_hp--dl160g8--3-root",
'UUID': "11124c1d-990b-4277-9f74-c5a34eb2cd04",
'TYPE': "xfs"},
{'NAME': "/dev/mapper/rhel_hp--dl160g8--3-swap",
'UUID': "c7c45f2d-1d1b-4cf0-9d51-e2b0046682f8",
'TYPE': "swap"},
{'NAME': "/dev/mapper/rhel_hp--dl160g8--3-home",
'UUID': "c7116820-f2de-4aee-8ea6-0b23c6491598",
'TYPE': "xfs"},
{'NAME': "/dev/mapper/rhel_hp--dl160g8--3-lv_test",
'UUID': "d403bcbd-0eea-4bff-95b9-2237740f5c8b",
'TYPE': "ext4"},
{'NAME': "/dev/block/253:1",
'UUID': "f8508c37-eeb1-4598-b084-5364d489031f",
'TYPE': "ext2"},
{'NAME': "/dev/cciss/c0d1p3",
'LABEL': "/u02",
'UUID': "004d0ca3-373f-4d44-a085-c19c47da8b5e",
'TYPE': "ext3"},
{'NAME': "/dev/loop0",
'LABEL': "Satellite-5.6.0 x86_64 Disc 0",
'TYPE': "iso9660"}]
class TestBLKID():
def test_get_blkid_info(self):
blkid_info = BlockIDInfo(context_wrap(BLKID_INFO))
expected_list = dict((r['NAME'], r) for r in EXPECTED_RESULTS)
assert len(blkid_info.data) == 9
for result in blkid_info.data:
assert result == expected_list[result['NAME']]
ext4_only = blkid_info.filter_by_type("ext4")
assert len(ext4_only) == 1
assert ext4_only[0] == {'NAME': "/dev/mapper/rhel_hp--dl160g8--3-lv_test",
'UUID': "d403bcbd-0eea-4bff-95b9-2237740f5c8b",
'TYPE': "ext4"}
xfs_only = blkid_info.filter_by_type("xfs")
expected_list = dict((r['NAME'], r) for r in EXPECTED_RESULTS if r['TYPE'] == "xfs")
assert len(xfs_only) == 3
for result in xfs_only:
assert result == expected_list[result['NAME']]
ext2_only = blkid_info.filter_by_type("ext2")
ext2_only[0]["NAME"] == "/dev/block/253:1"
|
def infmem():
# Allocates unbounded amount of memory.
x = 0
m = {}
total = 0
threshold = 1000
while True:
m[x] = "*" * x
total += x
x = x+1
if total > threshold:
print("%d bytes allocated" % total)
threshold *= 2
|
import logging
from mako.template import Template
from textwrap import TextWrapper
from plugin import Plugin
class TextResume(Plugin):
template_file_extension = 'mako'
def __init__ (self, template_file, resume_data, skip):
self.skip = skip
self.resume_data = resume_data
self.template_filename = template_file
self.indent_spaces = 2
def render(self, output_filename):
with open(output_filename, "w") as output_file:
txt = self._get_rendered_text()
output_file.write(txt)
def _get_rendered_text(self):
with open(self.template_filename) as tmpl_file:
tmpl_text = tmpl_file.read()
tmpl = Template(tmpl_text)
txt = tmpl.render(d=self.resume_data, s=self)
logging.debug(txt)
return txt
def _wrap(self, indent, s, width=70):
indent_str = " " * indent
t = TextWrapper( width=width, subsequent_indent = indent_str)
return '\n'.join(t.wrap(s))
|
from modelos import extraer_tipo_desde_codigo_factura, extraer_codigo_desde_codigo_factura
from user_data import extraer_datos_factura_desde_access_db, marcar_factura_enviada_access_db
from os import listdir
from os.path import isfile, join, abspath
import pyodbc
DBfile = 'resources/factusol_database.mdb'
conn = pyodbc.connect('DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=' + DBfile)
cursor = conn.cursor()
archivos_factturas = [(f.split(".")[0], abspath(f)) for f in listdir("facturas") if isfile(join("facturas", f))]
for (numero, ruta_pdf) in archivos_factturas:
print numero
tipo = extraer_tipo_desde_codigo_factura(numero)
codigo = extraer_codigo_desde_codigo_factura(numero)
factura = extraer_datos_factura_desde_access_db(cursor, numero, ruta_pdf)
print factura
marcar_factura_enviada_access_db(conn, cursor, factura)
factura = extraer_datos_factura_desde_access_db(cursor, numero, ruta_pdf)
print factura
cursor.close()
conn.close()
|
import pytest
import lluvia as ll
def test_load_library():
session = ll.createSession(enableDebug=True, loadNodeLibrary = False)
session.loadLibrary('lluvia/cpp/core/test/nodes/test_node_library.zip')
desc = session.createComputeNodeDescriptor('nodes/Assign')
assert(desc != None)
program = session.getProgram('nodes/Assign.comp')
assert(program != None)
node = session.createComputeNode("nodes/Assign")
assert(node != None)
assert(not session.hasReceivedVulkanWarningMessages())
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__]))
|
import pytest
from testlink.testlinkerrors import TLResponseError
from testlink.testlinkargs import registerMethod, getArgsForMethod
from testlink.testlinkdecorators import decoApiCallAddAttachment,\
decoApiCallAddDevKey, decoApiCallWithoutArgs, \
decoMakerApiCallReplaceTLResponseError, decoMakerApiCallWithArgs, \
decoMakerApiCallChangePosToOptArg
class dummy_api_testlinkdecorator(object):
""" class simulating testlink api client with required attributes for
testlinkdecorators_test """
devKey = '007'
def _getAttachmentArgs(self, attachmentfile):
# simulation of TestlinkAPIGeneric._getAttachmentArgs()
# needed in test_decoApiCallAddAttachment
return {'filename': 'name %s' % attachmentfile,
'filetype': 'type %s' % attachmentfile,
'content' : 'content %s' % attachmentfile}
@pytest.fixture()
def dummy_api():
""" simulates testlink api client with required attributes devKey and
_getAttachmentArgs
"""
return dummy_api_testlinkdecorator()
def test_noWrapperName_decoApiCallWithoutArgs():
" decorator test: original function name should be unchanged "
@decoApiCallWithoutArgs
def orig_funcname1(a_api):
"orig doc string1"
return 'noArgs'
assert 'orig_funcname1' == orig_funcname1.__name__
assert 'orig doc string1' == orig_funcname1.__doc__
assert 'testlinkdecorators_test' in orig_funcname1.__module__
def test_decoApiCallWithArgs():
" decorator test: positional and optional arguments should be registered "
from testlink.testlinkargs import getMethodsWithPositionalArgs
@decoMakerApiCallWithArgs(['Uno', 'due', 'tre'], ['quad'])
def DummyMethod(a_api):
"a dummy api method with 3 positional args and 1 optional arg"
pass
posArgs = getMethodsWithPositionalArgs()
assert ['Uno', 'due', 'tre'] == posArgs['DummyMethod']
def test_noWrapperName_decoApiCallWithArgs():
" decorator test: original function name should be unchanged "
@decoMakerApiCallWithArgs()
def orig_funcname2(a_api):
"orig doc string2"
return 'noArgs'
assert 'orig_funcname2' == orig_funcname2.__name__
assert 'orig doc string2' == orig_funcname2.__doc__
assert 'testlinkdecorators_test' in orig_funcname2.__module__
def test_decoApiCallAddDevKey(dummy_api):
" decorator test: argsOptional should be extended with devKey"
registerMethod('a_func')
@decoApiCallAddDevKey
def a_func(a_api, *argsPositional, **argsOptional):
return argsPositional, argsOptional
# check method argument definition
allArgs = getArgsForMethod('a_func')
assert (['devKey'], []) == allArgs
# check call arguments
response = a_func(dummy_api)
assert {'devKey' : dummy_api.devKey} == response[1]
def test_noWrapperName_decoApiCallAddDevKey():
" decorator test: original function name should be unchanged "
registerMethod('orig_funcname3')
@decoApiCallAddDevKey
def orig_funcname3(a_api, *argsPositional, **argsOptional):
"orig doc string3"
return argsPositional, argsOptional
assert 'orig_funcname3' == orig_funcname3.__name__
assert 'orig doc string3' == orig_funcname3.__doc__
assert 'testlinkdecorators_test' in orig_funcname3.__module__
def test_decoApiCallReplaceTLResponseError_NoCodeError():
" decorator test: TLResponseError (code=None) should be handled "
@decoMakerApiCallReplaceTLResponseError()
def a_func(a_api, *argsPositional, **argsOptional):
raise TLResponseError('DummyMethod',
argsOptional, 'Empty Response! ')
response = a_func('dummy_api')
assert [] == response
def test_decoApiCallReplaceTLResponseError_CodeError():
" decorator test: TLResponseError (code=777) should be raised "
@decoMakerApiCallReplaceTLResponseError()
def a_func(a_api, *argsPositional, **argsOptional):
raise TLResponseError('DummyMethod',
argsOptional, 'Empty Response! ', 777)
with pytest.raises(TLResponseError, match='777.*Empty'):
a_func('dummy_api')
def test_decoApiCallReplaceTLResponseError_CodeErrorOk():
" decorator test: TLResponseError (code=777) should be handled "
@decoMakerApiCallReplaceTLResponseError(777)
def a_func(a_api, *argsPositional, **argsOptional):
raise TLResponseError('DummyMethod',
argsOptional, 'Empty Response! ', 777)
response = a_func('dummy_api')
assert [] == response
def test_decoApiCallReplaceTLResponseError_NoError():
" decorator test: response without TLResponseError should be passed "
@decoMakerApiCallReplaceTLResponseError(777)
def a_func(a_api, *argsPositional, **argsOptional):
return argsOptional
response = a_func('dummy_api', name='BigBird')
assert {'name' : 'BigBird'} == response
def test_decoApiCallReplaceTLResponseError_replaceValue():
" decorator test: TLResponseError should be replaced with {}"
@decoMakerApiCallReplaceTLResponseError(replaceValue={})
def a_func(a_api, *argsPositional, **argsOptional):
raise TLResponseError('DummyMethod',
argsOptional, 'Empty Response! ')
response = a_func('dummy_api')
assert {} == response
def test_noWrapperName_decoApiCallReplaceTLResponseError():
" decorator test: original function name should be unchanged "
@decoMakerApiCallReplaceTLResponseError()
def orig_funcname4(a_api, *argsPositional, **argsOptional):
"orig doc string4"
return argsPositional, argsOptional
assert 'orig_funcname4' == orig_funcname4.__name__
assert 'orig doc string4' == orig_funcname4.__doc__
assert 'testlinkdecorators_test' in orig_funcname4.__module__
def test_decoApiCallAddAttachment(dummy_api):
" decorator test: argsOptional should be extended attachment file infos"
registerMethod('func_addAttachment')
@decoApiCallAddAttachment
def func_addAttachment(a_api, *argsPositional, **argsOptional):
return argsPositional, argsOptional
# check method argument definition
allArgs = getArgsForMethod('func_addAttachment')
assert (['devKey'], ['attachmentfile']) == allArgs
# check call arguments
response = func_addAttachment(dummy_api, 'a_file')
assert response[1] == {'devKey' : dummy_api.devKey,
'filename': 'name a_file',
'filetype': 'type a_file',
'content' : 'content a_file'}
def test_noWrapperName_decoApiCallAddAttachment():
" decorator test: original function name should be unchanged "
registerMethod('orig_funcname5')
@decoApiCallAddAttachment
def orig_funcname5(a_api):
"orig doc string5"
return 'noArgs'
assert 'orig_funcname5' == orig_funcname5.__name__
assert 'orig doc string5' == orig_funcname5.__doc__
assert 'testlinkdecorators_test' in orig_funcname5.__module__
def test_noWrapperName_decoApiCallChangePosToOptArg():
" decorator test: original function name should be unchanged "
@decoMakerApiCallChangePosToOptArg(2, 'optArgName')
def orig_funcname6(*argsPositional, **argsOptional):
"orig doc string6"
return argsPositional, argsOptional
assert 'orig_funcname6' == orig_funcname6.__name__
assert 'orig doc string6' == orig_funcname6.__doc__
assert 'testlinkdecorators_test' in orig_funcname6.__module__
def test_decoApiCallChangePosToOptArg_posArg2():
" decorator test: change posArg 2"
@decoMakerApiCallChangePosToOptArg(2, 'due')
def a_func(a_api, *argsPositional, **argsOptional):
return argsPositional, argsOptional
#'Uno', 'due', 'tre', 'quad', 'cinque'
# 2 posArgs 2optArgs -> 1posArg, 3optArg
(posArgs, optArgs) = a_func('dummy_api', 1, 2, tre = 3, quad = 4 )
assert (1,) == posArgs
assert {'due' : 2, 'tre' : 3, 'quad' : 4 } == optArgs
# 3 posArgs 2optArgs -> 2posArg, 2optArg
(posArgs, optArgs) = a_func('dummy_api', 1, 2, 3, quad = 4 , due = 5)
assert (1,3) == posArgs
assert {'due' : 2, 'quad' : 4 } == optArgs
# 1 posArgs 2optArgs -> 1posArg, 2optArg
(posArgs, optArgs) = a_func('dummy_api', 1, due = 2, tre = 3)
assert (1,) == posArgs
assert {'due' : 2, 'tre' : 3 } == optArgs
# 0 posArgs 2optArgs -> 0posArg, 2optArg
(posArgs, optArgs) = a_func('dummy_api', uno = 1, due = 2)
assert () == posArgs
assert {'uno' : 1, 'due' :2} == optArgs
def test_decoApiCallChangePosToOptArg_posArg3():
" decorator test: change posArg 3"
@decoMakerApiCallChangePosToOptArg(3, 'tre')
def a_func(a_api, *argsPositional, **argsOptional):
return argsPositional, argsOptional
# 3 posArgs 0optArgs -> 2posArg, 1optArg
(posArgs, optArgs) = a_func('dummy_api', 1, 2, 3 )
assert (1,2) == posArgs
assert {'tre' : 3} == optArgs
# 2 posArgs 0optArgs -> 2posArg, 0optArg
(posArgs, optArgs) = a_func('dummy_api', 1, 2 )
assert (1,2) == posArgs
assert {} == optArgs
def test_decoApiCallChangePosToOptArg_posArgNeg1():
" decorator test: change posArg -1"
@decoMakerApiCallChangePosToOptArg(-1, 'last')
def a_func(a_api, *argsPositional, **argsOptional):
return argsPositional, argsOptional
# 3 posArgs 0optArgs -> 2posArg, 1optArg
(posArgs, optArgs) = a_func('dummy_api', 1, 2, 3 )
assert (1,2,3) == posArgs
assert {} == optArgs
# 1 posArgs 0optArgs -> 0posArg, 1optArg
(posArgs, optArgs) = a_func('dummy_api', 1 )
assert (1,) == posArgs
assert {} == optArgs
|
APPNAME = "Telechat-Py"
VERSION = "0.1"
BASE_DIR = "." # TODO
MSG_DIR = BASE_DIR + "/msg"
ACCT_FILENAME = "acct.db"
ACCT_PASSWORDS = "pwd.db"
LOG_FILENAME = "log"
PRELOGIN_FILENAME = "prelogin.msg"
POSTLOGIN_FILENAME = "postlogin.msg"
WELCOME_HEADER = APPNAME + ' v' + VERSION + """
Copyright (C) 2010 Skye Nott, F4 Systems
"""
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
def oozie(is_server=False # TODO: see if see can remove this
):
import params
if is_server:
params.HdfsDirectory(params.oozie_hdfs_user_dir,
action="create",
owner=params.oozie_user,
mode=params.oozie_hdfs_user_mode
)
Directory( params.conf_dir,
recursive = True,
owner = params.oozie_user,
group = params.user_group
)
XmlConfig( "oozie-site.xml",
conf_dir = params.conf_dir,
configurations = params.config['configurations']['oozie-site'],
configuration_attributes=params.config['configuration_attributes']['oozie-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0664
)
File(format("{conf_dir}/oozie-env.sh"),
owner=params.oozie_user,
content=InlineTemplate(params.oozie_env_sh_template)
)
if params.security_enabled:
tomcat_conf_dir = format("{tomcat_conf_secure}")
else:
tomcat_conf_dir = format("{tomcat_conf}")
File(format("{tomcat_conf_dir}/catalina.properties"),
content = Template("catalina.properties.j2"),
owner = params.oozie_user,
group = params.user_group,
mode = 0755
)
if (params.log4j_props != None):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user
)
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
curl -kf -x \"\" \
--retry 5 {jdk_location}{check_db_connection_jar_name}\
-o {check_db_connection_jar_name}'"),
not_if = format("[ -f {check_db_connection_jar} ]"),
environment=environment
)
oozie_ownership( )
if is_server:
oozie_server_specific( )
def oozie_ownership(
):
import params
File ( format("{conf_dir}/adminusers.txt"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/hadoop-config.xml"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/oozie-default.xml"),
owner = params.oozie_user,
group = params.user_group
)
Directory ( format("{conf_dir}/action-conf"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/action-conf/hive.xml"),
owner = params.oozie_user,
group = params.user_group
)
def oozie_server_specific(
):
import params
File(params.pid_file,
action="delete",
not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
)
oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
Directory( oozie_server_directorties,
owner = params.oozie_user,
mode = 0755,
recursive = True
)
cmd1 = "sh"
if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
cmd1 += format(" && cp {jdbc_driver_jar} {oozie_lib_dir}")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
Execute( [cmd1],
not_if = no_op_test
)
def correct_hadoop_auth_jar_files():
hadoop_auth_jar_file = "/usr/lib/hadoop/hadoop-auth-2.4.1.jar"
if not os.path.exists(hadoop_auth_jar_file):
raise Fail("Could not find %s" % (hadoop_auth_jar_file))
commands = ' '.join(
(
"if [ -f /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar ];",
"then",
"rm -rf /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar;",
"cp " + hadoop_auth_jar_file + " /usr/lib/oozie/lib;",
"fi"
)
)
Execute(commands)
commands = ' '.join(
(
"if [ -f /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar ];",
"then",
"rm -rf /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar;",
"cp " + hadoop_auth_jar_file + " /usr/lib/oozie/libtools;",
"fi"
)
)
Execute(commands)
|
__author__ = "Satish Palaniappan"
'''
Copyright 2015 Satish Palaniappan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys,traceback
from sentiments import Sentiments
from sentiments.ttypes import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import socket
sys.path.append("../")
from SentiHandlers.SentiMaster import SentiHandle
class SentimentHandler:
def __init__(self):
self.log = {}
self.S = SentiHandle()
def ping(self):
print ("Ping Success !! :D")
return
def getSentimentScore(self,obj):
'''
Arguments List:
general -> mainText,textType = "general"
microblogs -> mainText, textType = "microblogs"
comments -> mainText, textType = "comments"
reviews -> mainText, textType = "reviews", title = "" <optional>,topDomain,subDomain = "" <depends, not always optional, refer the list in config.py>
blogs_news -> mainText< or first paragraph>, title, textType="blogs_news",lastPara = "" <optional last paragraph>,middleParas = [] <optional middle paragraphs(separate each para with newline into string)>
'''
try:
S = self.S.getSentimentScore(obj.mainText,obj.textType,obj.title,obj.middleParas,obj.lastPara,obj.topDomain,obj.subDomain)
print ("The Text : " + obj.mainText + " ||| SentimentScore[-5 to 5]: " + str(S))
return S
except Exception as err:
print(traceback.format_exc())
print(sys.exc_info()[0])
handler = SentimentHandler()
processor = Sentiments.Processor(handler)
transport = TSocket.TServerSocket(port=8002)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
print ("Python sentiment server running...")
server.serve()
|
import datetime
import json
import os
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.sysinfo import SiteName
from ipf.step import Step
from .entity import *
class AdminDomainStep(Step):
def __init__(self):
Step.__init__(self)
self.description = "a domain containing a number of services"
self._acceptParameter("admin_domain",
"An AdminDomain as a dictionary. See AdminDomain.fromJson() for the keys and values.",
True)
self.time_out = 5
self.produces = [AdminDomain]
def run(self):
try:
doc = self.params["admin_domain"]
except KeyError:
raise StepError("admin_domain not specified")
domain = AdminDomain()
domain.fromJson(doc)
self._output(domain)
class Domain(Entity):
DEFAULT_VALIDITY = 60*60*24 # seconds
def __init__(self):
Entity.__init__(self)
self.Description = None # string
self.WWW = None # URL
self.ContactID = [] # Contact
self.LocationID = None # Location
class DomainOgfJson(EntityOgfJson):
data_cls = Domain
def __init__(self, data):
EntityOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
if domain.Description is not None:
doc["Description"] = domain.Description
if domain.WWW is not None:
doc["WWW"] = domain.WWW
associations = {}
if len(domain.ContactID) > 0:
associations["ContactID"] = domain.ContactID
associations["LocationID"] = domain.LocationID
doc["Associations"] = associations
return doc
class AdminDomain(Domain):
def __init__(self):
Domain.__init__(self)
self.Distributed = None # geographically-distributed resources (boolean)
self.Owner = None # person/entity that owns the resources (string)
self.ServiceID = [] # services managed by this domain (id)
self.ChildDomainID = [] # this domain aggregates others (id)
self.ParentDomainID = None # this domain is part of another
self.ComputingServiceID = [] # (id)
self.StorageServiceID = [] # (id)
class AdminDomainOgfJson(DomainOgfJson):
data_cls = AdminDomain
def __init__(self, data):
DomainOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = DomainOgfJson.toJson(self)
# AdminDomain
if domain.Distributed is not None:
doc["Distributed"] = domain.Distributed
if domain.Owner is not None:
doc["Owner"] = domain.Owner
associations = {}
if len(domain.ServiceID) > 0:
associations["ServiceID"] = domain.ServiceID
if len(domain.ChildDomainID) > 0:
associations["ChildDomainID"] = domain.ChildDomainID
associations["ParentDomainID"] = domain.ParentDomainID
if len(domain.ComputingServiceID) > 0:
associations["ComputingServiceID"] = domain.ComputingServiceID
if len(domain.StorageServiceID) > 0:
associations["StorageServiceID"] = domain.StorageServiceID
doc["Associations"] = associations
return doc
|
import os
import re
import pwd
import shutil
import socket
import array
import struct
import fcntl
import time
import azurelinuxagent.logger as logger
import azurelinuxagent.utils.fileutil as fileutil
import azurelinuxagent.utils.shellutil as shellutil
import azurelinuxagent.utils.textutil as textutil
from azurelinuxagent.distro.default.osutil import DefaultOSUtil
class Ubuntu14OSUtil(DefaultOSUtil):
def __init__(self):
super(Ubuntu14OSUtil, self).__init__()
def start_network(self):
return shellutil.run("service networking start", chk_err=False)
def stop_agent_service(self):
return shellutil.run("service walinuxagent stop", chk_err=False)
def start_agent_service(self):
return shellutil.run("service walinuxagent start", chk_err=False)
class Ubuntu12OSUtil(Ubuntu14OSUtil):
def __init__(self):
super(Ubuntu12OSUtil, self).__init__()
#Override
def get_dhcp_pid(self):
ret= shellutil.run_get_output("pidof dhclient3")
return ret[1] if ret[0] == 0 else None
class UbuntuOSUtil(Ubuntu14OSUtil):
def __init__(self):
super(UbuntuOSUtil, self).__init__()
def register_agent_service(self):
return shellutil.run("systemctl unmask walinuxagent", chk_err=False)
def unregister_agent_service(self):
return shellutil.run("systemctl mask walinuxagent", chk_err=False)
class UbuntuSnappyOSUtil(Ubuntu14OSUtil):
def __init__(self):
super(UbuntuSnappyOSUtil, self).__init__()
self.conf_file_path = '/apps/walinuxagent/current/waagent.conf'
def remove_rules_files(self, rules_files=""):
pass
def restore_rules_files(self, rules_files=""):
pass
|
"""System for assigning and displaying ratings of explorations.
"""
__author__ = 'Jacob Davis'
import datetime
from core.domain import exp_services
from core.platform import models
import feconf
(exp_models, user_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.user])
transaction_services = models.Registry.import_transaction_services()
def assign_rating_to_exploration(user_id, exploration_id, new_rating):
"""Records the rating awarded by the user to the exploration in both the
user-specific data and exploration summary.
It validates the exploration id but not the user id.
- 'new_rating' should be an integer between 1 and 5
"""
if not isinstance(new_rating, int):
raise ValueError(
'Expected the rating to be an integer, received %s' % new_rating)
ALLOWED_RATINGS = [1, 2, 3, 4, 5]
if new_rating not in ALLOWED_RATINGS:
raise ValueError('Expected a rating 1-5, received %s.' % new_rating)
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise Exception('Invalid exploration id %s' % exploration_id)
def _update_user_rating():
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exp_user_data_model:
old_rating = exp_user_data_model.rating
else:
old_rating = None
exp_user_data_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
exp_user_data_model.rating = new_rating
exp_user_data_model.rated_on = datetime.datetime.utcnow()
exp_user_data_model.put()
return old_rating
old_rating = transaction_services.run_in_transaction(_update_user_rating)
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration_id)
if not exploration_summary.ratings:
exploration_summary.ratings = feconf.get_empty_ratings()
exploration_summary.ratings[str(new_rating)] += 1
if old_rating:
exploration_summary.ratings[str(old_rating)] -= 1
exp_services.save_exploration_summary(exploration_summary)
def get_user_specific_rating_for_exploration(user_id, exploration_id):
"""
Returns:
An integer 1-5, or None if there is no rating of this exploration by
this user.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rating if exp_user_data_model else None
def get_when_exploration_rated(user_id, exploration_id):
"""Returns the date-time the exploration was lasted rated by this user, or
None if no such rating has been awarded.
Currently this function is only used for testing since the times ratings
were awarded are not used for anything.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rated_on if exp_user_data_model else None
def get_overall_ratings_for_exploration(exploration_id):
exp_summary = exp_services.get_exploration_summary_by_id(exploration_id)
return exp_summary.ratings
|
import os
import time
from io import BytesIO
from zope.interface import implementer
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.web.iweb import IAgentEndpointFactory
from twisted.web.client import Agent, readBody, FileBodyProducer
from twisted.internet import reactor
from twisted.web.http_headers import Headers
import json
from json import dumps
from twisted.trial import unittest
import subprocess
from sh import cat
from sh import kill
from config.setupcfg import getdefaultconfig, setup_logging
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONFIG_FILE = '/etc/hpedockerplugin/hpe.conf'
CONFIG = ['--config-file', CONFIG_FILE]
TEST_DIR = os.path.abspath('../')
TWISTD_PID = TEST_DIR + '/twistd.pid'
hpe_sock_path = b"/run/docker/plugins/hpe/hpe.sock"
@implementer(IAgentEndpointFactory)
class HPEEndpointFactory(object):
"""
Connect to hpe3's Unix socket.
"""
def __init__(self):
self.reactor = reactor
def endpointForURI(self, uri):
return UNIXClientEndpoint(self.reactor, hpe_sock_path)
class HPEPLUGINTESTS(unittest.TestCase):
def _wait_for_pid_file(self, filename, wait_time):
count = 0
while not os.path.exists(filename):
if count == wait_time:
break
time.sleep(1)
count += 1
if os.path.isfile(filename):
self.twistd_pid = cat(filename)
print 'self.twistd_pid: ', self.twistd_pid
else:
raise ValueError("%s isn't a file!" % filename)
def checkResponse(self, response, exp_result):
# TODO: convert to log messages
"""
print 'Response version:', response.version
print 'Response code:', response.code
print 'Response phrase:', response.phrase
print 'Response headers:'
print pformat(list(response.headers.getAllRawHeaders()))
"""
"""
LOG.debug("Response Body %s", str(response.version))
LOG.debug("Response Body %s", str(response.code))
LOG.debug("Response Body %s", str(response.phrase))
LOG.debug("Response Body %s",
str(list(response.headers.getAllRawHeaders())))
LOG.debug("Expected Results %s", str(exp_result))
"""
d = readBody(response)
d.addCallback(self.assertResponse, exp_result)
return d
def getResponse(self, response):
# TODO: convert to log messages
"""
print 'Response version:', response.version
print 'Response code:', response.code
print 'Response phrase:', response.phrase
print 'Response headers:'
print pformat(list(response.headers.getAllRawHeaders()))
"""
"""
LOG.debug("Response Body %s", str(response.version))
LOG.debug("Response Body %s", str(response.code))
LOG.debug("Response Body %s", str(response.phrase))
LOG.debug("Response Body %s",
str(list(response.headers.getAllRawHeaders())))
LOG.debug("Expected Results %s", str(exp_result))
"""
d = readBody(response)
return d
def assertResponse(self, body, exp_result):
LOG.debug("Response Body %s", str(body))
LOG.debug("Expected Results %s", str(exp_result))
self.assertEqual(body, exp_result)
def cbFailed(self, failure):
LOG.error("Test Failed %s", str(failure))
self.fail(msg='Test Failed')
"""
Connect to hpe3's Unix socket.
"""
def setUp(self):
# Setup Test Logging
# Set Logging level
# Setup the default, hpe3parconfig, and hpelefthandconfig
# configuration objects.
hpedefaultconfig = getdefaultconfig(CONFIG)
logging_level = hpedefaultconfig.logging
setup_logging('test_hpe_plugin', logging_level)
# Start HPE Docker Plugin
bashcommand = "/usr/bin/twistd hpe_plugin_service"
try:
subprocess.check_output(['sh', '-c', bashcommand], cwd=TEST_DIR)
except:
LOG.error("Test Setup Failed: Could not change dir")
self.fail(msg='Test Failed')
self._wait_for_pid_file(TWISTD_PID, 5)
def tearDown(self):
# Stop HPE Docker Plugin
kill(str(self.twistd_pid))
is_running = os.path.exists("/proc/%s" % str(self.twistd_pid))
while is_running:
is_running = os.path.exists("/proc/%s" % str(self.twistd_pid))
time.sleep(0.25)
def test_hpe_activate(self):
path = b"/Plugin.Activate"
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path)
d.addCallback(self.checkResponse, json.dumps({u"Implements":
[u"VolumeDriver"]}))
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": None}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_size_option(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"size": u"50"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_provisioning_option(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"provisioning": u"full"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_invalid_provisioning_option(self):
name = 'test-create-volume-fake'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"provisioning": u"fake"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({
u"Err": "Invalid input received: Must specify a valid " +
"provisioning type ['thin', 'full', " +
"'dedup'], value 'fake' is invalid."}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_invalid_option(self):
name = 'test-create-volume-fake'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"fake": u"fake"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({
u"Err": "create volume failed, error is: fake is not a valid "
"option. Valid options are: ['size', 'provisioning', "
"'flash-cache']"}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def _remove_volume_callback(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
return self._remove_volume(name)
def _remove_volume(self, name):
path = b"/VolumeDriver.Remove"
body = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def test_hpe_remove_volume(self):
name = 'test-create-volume'
return self._remove_volume(name)
def _get_volume_mount_path(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
# In this test, we need it to compare expected results with Path
# request
# Compare path returned by mount (body) with Get Path request
path = b"/VolumeDriver.Path"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, body)
d.addErrback(self.cbFailed)
return d
def _mount_the_volume(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
# Mount the previously created volume
path = b"/VolumeDriver.Mount"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.getResponse)
# If we get a valid response from Path request then we assume
# the mount passed.
# TODO: Add additonal logic to verify the mountpath
d.addCallback(self._get_volume_mount_path, name)
return d
def _unmount_the_volume(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
path = b"/VolumeDriver.Unmount"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def broken_test_hpe_mount_umount_volume(self):
name = 'test-mount-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# Mount the previously created volume
d.addCallback(self._mount_the_volume, name)
# UMount the previously created volume
d.addCallback(self._unmount_the_volume, name)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def test_hpe_get_volume(self):
name = 'test-get-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# Get the previously created volume
expected = {u"Volume": {u"Status": {},
u"Mountpoint": '',
u"Name": name},
u"Err": ''}
d.addCallback(self._get_volume, name, expected)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def test_hpe_get_non_existent_volume(self):
name = 'test-get-volume'
# Get the previously created volume
expected = {u"Err": ''}
d = self._get_volume({}, name, expected)
return d
def _get_volume(self, body, name, expected):
path = b"/VolumeDriver.Get"
body = {u"Name": name}
# Get a volume
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps(expected))
d.addErrback(self.cbFailed)
return d
def broken_test_hpe_list_volume(self):
name = 'test-list-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# List volumes
expected = {u"Err": '',
u"Volumes": [{u"Mountpoint": '',
u"Name": name}]}
d.addCallback(self._list_volumes, name, expected)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def broken_test_hpe_list_volume_no_volumes(self):
path = b"/VolumeDriver.List"
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps({})))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": '',
u"Volumes": []}))
d.addErrback(self.cbFailed)
return d
def _list_volumes(self, body, name, expected):
path = b"/VolumeDriver.List"
body = {u"Name": name}
# Get a volume
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps(expected))
d.addErrback(self.cbFailed)
return d
|
"""Test forms."""
from werkzeug.datastructures import ImmutableMultiDict as IMultidict
from osi_prototype.public.forms import LoginForm
from osi_prototype.user.forms import EditForm, MessageForm, RegisterForm
class TestRegisterForm:
"""Register form."""
def test_validate_user_already_registered(self, user):
"""Enter username that is already registered."""
form = RegisterForm(firstname='test first name', lastname='test last name',
username=user.username, email='foo@bar.com',
password='example', confirm='example',
usertype='parent')
assert form.validate() is False
assert 'Username already registered' in form.username.errors
def test_validate_email_already_registered(self, user):
"""Enter email that is already registered."""
form = RegisterForm(firstname='test first name', lastname='test last name',
username='unique', email=user.email,
password='example', confirm='example',
usertype='parent')
assert form.validate() is False
assert 'Email already registered' in form.email.errors
def test_validate_success(self, db):
"""Register with success."""
form = RegisterForm(firstname='test first name', lastname='test last name',
username='newusername', email='new@test.test',
password='example', confirm='example',
usertype='parent')
assert form.validate() is True
def test_validate_missing_fields(self, db):
"""Register with failure due to missing fields."""
form = RegisterForm(username='test', email='foo@bar.com',
password='example', confirm='example',
usertype='parent')
assert form.validate() is False
assert any('required' in e for e in form.lastname.errors)
assert any('required' in e for e in form.firstname.errors)
def test_validate_bad_user_type(self, db):
"""Register with failure due to bad user type."""
form = RegisterForm(firstname='first', lastname='last',
username='test', email='foo@bar.com',
password='example', confirm='example',
usertype='bad')
assert form.validate() is False
assert any('valid choice' in e for e in form.usertype.errors)
class TestLoginForm:
"""Login form."""
def test_validate_success(self, user):
"""Login successful."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
"""Unknown username."""
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
"""Invalid password."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
"""Inactive user."""
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors
class TestEditForm:
"""Edit form."""
def test_validate_no_edits(self, user):
"""Successful even with no edits."""
form = EditForm()
assert form.validate() is True
def test_validate_bad_numbers(self, db):
"""Unsuccessful with bad numbers."""
data = IMultidict({'num_adults': -5,
'num_children': 500,
'num_capacity': -1})
form = EditForm(data)
assert form.validate() is False
assert len(form.num_adults.errors) > 0
assert len(form.num_children.errors) > 0
assert len(form.num_children.errors) > 0
class TestMessageForm:
"""Message form."""
def test_validate_success(self, user):
"""Successful message sent."""
form = MessageForm(from_username='abc', to_username='def', body='xyz')
assert form.validate() is True
def test_validate_message_too_short(self, user):
"""Message too short."""
form = MessageForm(from_username='abc', to_username='def', body='xy')
assert form.validate() is False
|
import gin
import os
import util
import numpy as np
import gym
def main(log_dir):
logger = util.create_logger(name='data_collection')
solution = util.create_solution(device='cpu:0')
model_file = os.path.join(log_dir, 'model.npz')
solution.load(model_file)
trajectories = []
env = gym.make('AntBulletEnv-v0')
# Collect trajectories from rollouts.
max_ep_cnt = 1000
traj_len = 500
ep_saved = 0
while ep_saved < max_ep_cnt:
ep_reward = 0
ep_steps = 0
obs = env.reset()
prev_act = np.zeros(8)
ep_traj = []
done = False
while not done and ep_steps < traj_len:
act = solution.get_action(obs)
ep_traj.append(np.concatenate([prev_act, obs, act], axis=0))
obs, reward, done, info = env.step(act)
ep_reward += reward
ep_steps += 1
logger.info(
'Episode:{0}, steps:{1}, reward:{2:.2f}'.format(
ep_saved + 1, ep_steps, ep_reward)
)
if ep_steps >= traj_len:
trajectories.append(np.vstack(ep_traj))
ep_saved += 1
else:
logger.info('Trajectory too short, discard.')
trajectories = np.stack(trajectories)
logger.info('trajectories.shape={}'.format(trajectories.shape))
np.savez(os.path.join(log_dir, 'data.npz'), data=trajectories)
if __name__ == '__main__':
model_dir = 'pretrained/ant_mlp'
gin.parse_config_file(os.path.join(model_dir, 'config.gin'))
main(model_dir)
|
try:
import simplejson as json
except ImportError:
import json
import random
from weakref import WeakValueDictionary
from riak.client.operations import RiakClientOperations
from riak.node import RiakNode
from riak.bucket import RiakBucket, BucketType
from riak.mapreduce import RiakMapReduceChain
from riak.resolver import default_resolver
from riak.table import Table
from riak.transports.http import HttpPool
from riak.transports.tcp import TcpPool
from riak.security import SecurityCreds
from riak.util import lazy_property, bytes_to_str, str_to_bytes
from six import string_types, PY2
from riak.client.multi import MultiGetPool, MultiPutPool
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
def binary_json_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
def binary_json_decoder(obj):
"""
Default decoder from JSON datatypes.
"""
return json.loads(bytes_to_str(obj))
def binary_encoder_decoder(obj):
"""
Assumes value is already in binary format, so passes unchanged.
"""
return obj
class RiakClient(RiakMapReduceChain, RiakClientOperations):
"""
The ``RiakClient`` object holds information necessary to connect
to Riak. Requests can be made to Riak directly through the client
or by using the methods on related objects.
"""
#: The supported protocols
PROTOCOLS = ['http', 'pbc']
def __init__(self, protocol='pbc', transport_options={},
nodes=None, credentials=None,
multiget_pool_size=None, multiput_pool_size=None,
**kwargs):
"""
Construct a new ``RiakClient`` object.
:param protocol: the preferred protocol, defaults to 'pbc'
:type protocol: string
:param nodes: a list of node configurations,
where each configuration is a dict containing the keys
'host', 'http_port', and 'pb_port'
:type nodes: list
:param transport_options: Optional key-value args to pass to
the transport constructor
:type transport_options: dict
:param credentials: optional object of security info
:type credentials: :class:`~riak.security.SecurityCreds` or dict
:param multiget_pool_size: the number of threads to use in
:meth:`multiget` operations. Defaults to a factor of the number of
CPUs in the system
:type multiget_pool_size: int
:param multiput_pool_size: the number of threads to use in
:meth:`multiput` operations. Defaults to a factor of the number of
CPUs in the system
:type multiput_pool_size: int
"""
kwargs = kwargs.copy()
if nodes is None:
self.nodes = [self._create_node(kwargs), ]
else:
self.nodes = [self._create_node(n) for n in nodes]
self._multiget_pool_size = multiget_pool_size
self._multiput_pool_size = multiput_pool_size
self.protocol = protocol or 'pbc'
self._resolver = None
self._credentials = self._create_credentials(credentials)
self._http_pool = HttpPool(self, **transport_options)
self._tcp_pool = TcpPool(self, **transport_options)
self._closed = False
if PY2:
self._encoders = {'application/json': default_encoder,
'text/json': default_encoder,
'text/plain': str}
self._decoders = {'application/json': json.loads,
'text/json': json.loads,
'text/plain': str}
else:
self._encoders = {'application/json': binary_json_encoder,
'text/json': binary_json_encoder,
'text/plain': str_to_bytes,
'binary/octet-stream': binary_encoder_decoder}
self._decoders = {'application/json': binary_json_decoder,
'text/json': binary_json_decoder,
'text/plain': bytes_to_str,
'binary/octet-stream': binary_encoder_decoder}
self._buckets = WeakValueDictionary()
self._bucket_types = WeakValueDictionary()
self._tables = WeakValueDictionary()
def __del__(self):
self.close()
def _get_protocol(self):
return self._protocol
def _set_protocol(self, value):
if value not in self.PROTOCOLS:
raise ValueError("protocol option is invalid, must be one of %s" %
repr(self.PROTOCOLS))
self._protocol = value
protocol = property(_get_protocol, _set_protocol,
doc="""
Which protocol to prefer, one of
:attr:`PROTOCOLS
<riak.client.RiakClient.PROTOCOLS>`. Please
note that when one protocol is selected, the
other protocols MAY NOT attempt to connect.
Changing to another protocol will cause a
connection on the next request.
Some requests are only valid over ``'http'``,
and will always be sent via
those transports, regardless of which protocol
is preferred.
""")
def _get_resolver(self):
return self._resolver or default_resolver
def _set_resolver(self, value):
if value is None or callable(value):
self._resolver = value
else:
raise TypeError("resolver is not a function")
resolver = property(_get_resolver, _set_resolver,
doc=""" The sibling-resolution function for this client.
Defaults to :func:`riak.resolver.default_resolver`.""")
def _get_client_id(self):
with self._transport() as transport:
return transport.client_id
def _set_client_id(self, client_id):
for http in self._http_pool:
http.client_id = client_id
for pb in self._tcp_pool:
pb.client_id = client_id
client_id = property(_get_client_id, _set_client_id,
doc="""The client ID for this client instance""")
def get_encoder(self, content_type):
"""
Get the encoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
return self._encoders.get(content_type)
def set_encoder(self, content_type, encoder):
"""
Set the encoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:param encoder: an encoding function, takes a single object
argument and returns encoded data
:type encoder: function
"""
self._encoders[content_type] = encoder
def get_decoder(self, content_type):
"""
Get the decoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
return self._decoders.get(content_type)
def set_decoder(self, content_type, decoder):
"""
Set the decoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:param decoder: a decoding function, takes encoded data and
returns a Python type
:type decoder: function
"""
self._decoders[content_type] = decoder
def bucket(self, name, bucket_type='default'):
"""
Get the bucket by the specified name. Since buckets always exist,
this will always return a
:class:`RiakBucket <riak.bucket.RiakBucket>`.
If you are using a bucket that is contained in a bucket type, it is
preferable to access it from the bucket type object::
# Preferred:
client.bucket_type("foo").bucket("bar")
# Equivalent, but not preferred:
client.bucket("bar", bucket_type="foo")
:param name: the bucket name
:type name: str
:param bucket_type: the parent bucket-type
:type bucket_type: :class:`BucketType <riak.bucket.BucketType>`
or str
:rtype: :class:`RiakBucket <riak.bucket.RiakBucket>`
"""
if not isinstance(name, string_types):
raise TypeError('Bucket name must be a string')
if isinstance(bucket_type, string_types):
bucket_type = self.bucket_type(bucket_type)
elif not isinstance(bucket_type, BucketType):
raise TypeError('bucket_type must be a string '
'or riak.bucket.BucketType')
b = RiakBucket(self, name, bucket_type)
return self._setdefault_handle_none(
self._buckets, (bucket_type, name), b)
def bucket_type(self, name):
"""
Gets the bucket-type by the specified name. Bucket-types do
not always exist (unlike buckets), but this will always return
a :class:`BucketType <riak.bucket.BucketType>` object.
:param name: the bucket-type name
:type name: str
:rtype: :class:`BucketType <riak.bucket.BucketType>`
"""
if not isinstance(name, string_types):
raise TypeError('BucketType name must be a string')
btype = BucketType(self, name)
return self._setdefault_handle_none(
self._bucket_types, name, btype)
def table(self, name):
"""
Gets the table by the specified name. Tables do
not always exist (unlike buckets), but this will always return
a :class:`Table <riak.table.Table>` object.
:param name: the table name
:type name: str
:rtype: :class:`Table <riak.table.Table>`
"""
if not isinstance(name, string_types):
raise TypeError('Table name must be a string')
if name in self._tables:
return self._tables[name]
else:
table = Table(self, name)
self._tables[name] = table
return table
def close(self):
"""
Iterate through all of the connections and close each one.
"""
if not self._closed:
self._closed = True
self._stop_multi_pools()
if self._http_pool is not None:
self._http_pool.clear()
self._http_pool = None
if self._tcp_pool is not None:
self._tcp_pool.clear()
self._tcp_pool = None
def _stop_multi_pools(self):
if self._multiget_pool:
self._multiget_pool.stop()
self._multiget_pool = None
if self._multiput_pool:
self._multiput_pool.stop()
self._multiput_pool = None
def _create_node(self, n):
if isinstance(n, RiakNode):
return n
elif isinstance(n, tuple) and len(n) is 3:
host, http_port, pb_port = n
return RiakNode(host=host,
http_port=http_port,
pb_port=pb_port)
elif isinstance(n, dict):
return RiakNode(**n)
else:
raise TypeError("%s is not a valid node configuration"
% repr(n))
def _create_credentials(self, n):
"""
Create security credentials, if necessary.
"""
if not n:
return n
elif isinstance(n, SecurityCreds):
return n
elif isinstance(n, dict):
return SecurityCreds(**n)
else:
raise TypeError("%s is not a valid security configuration"
% repr(n))
def _choose_node(self, nodes=None):
"""
Chooses a random node from the list of nodes in the client,
taking into account each node's recent error rate.
:rtype RiakNode
"""
if not nodes:
nodes = self.nodes
# Prefer nodes which have gone a reasonable time without
# errors
def _error_rate(node):
return node.error_rate.value()
good = [n for n in nodes if _error_rate(n) < 0.1]
if len(good) is 0:
# Fall back to a minimally broken node
return min(nodes, key=_error_rate)
else:
return random.choice(good)
def _setdefault_handle_none(self, wvdict, key, value):
# TODO FIXME FUTURE
# This is a workaround for Python issue 19542
# http://bugs.python.org/issue19542
rv = wvdict.setdefault(key, value)
if rv is None:
return value
else:
return rv
@lazy_property
def _multiget_pool(self):
if self._multiget_pool_size:
return MultiGetPool(self._multiget_pool_size)
else:
return None
@lazy_property
def _multiput_pool(self):
if self._multiput_pool_size:
return MultiPutPool(self._multiput_pool_size)
else:
return None
def __hash__(self):
return hash(frozenset([(n.host, n.http_port, n.pb_port)
for n in self.nodes]))
def __eq__(self, other):
if isinstance(other, self.__class__):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return hash(self) != hash(other)
else:
return True
|
import datetime
import logging
from django.conf import settings
from .. import email
logger = logging.getLogger(__name__)
def sync_daily_email_blasts(blast):
for l in blast.recipient_lists.all():
l.send(blast)
blast.send_completed_on = datetime.datetime.now()
blast.save()
def sync_recipient_list(recipients_list, blast):
for r in recipients_list.recipients.all():
logger.debug('sync_recipient_list: %s %s %s' % (blast, recipients_list,
r))
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
html = blast.render(recipient, recipients_list)
subject = blast.subject
bodies = {'html': html, 'text': 'This email requires HTML'} # TODO: Support plain text rendering
to = (recipient.email,)
from_email = settings.TT_DAILYEMAILBLAST_FROMEMAIL
headers = {} # ???
email.send_email(subject, bodies, from_email, to, headers)
|
def get_key_version_attestation(project_id, location_id, key_ring_id, key_id, version_id):
"""
Get an HSM-backend key's attestation.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key to use (e.g. 'my-key').
version_id (string): ID of the version to use (e.g. '1').
Returns:
Attestation: Cloud KMS key attestation.
"""
# Import the client library.
from google.cloud import kms
# Import base64 for printing the attestation.
import base64
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the key version name.
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, key_id, version_id)
# Call the API.
version = client.get_crypto_key_version(request={'name': key_version_name})
# Only HSM keys have an attestation. For other key types, the attestion
# will be None.
attestation = version.attestation
if not attestation:
raise 'no attestation - attestations only exist on HSM keys'
encoded_attestation = base64.b64encode(attestation.content)
print('Got key attestation: {}'.format(encoded_attestation))
return attestation
|
"""Generates some swift wrapper from some ops description protobuf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import bytes
import json
import os
import six
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import c_api_util
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'api_def_path',
None,
'path to the api_def directory, e.g. tensorflow/core/api_def/base_api')
flags.DEFINE_string(
'output_path',
None,
'path for the generated swift file')
flags.DEFINE_string(
'dispatching_output_path',
None,
'path for the generated swift file')
_WARNING = """// !!! THIS CODE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND !!!
//
"""
_HEADER = """// Copyright 2019 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"""
_DISPATCHER_TEMPLATE = '''@available(
*, deprecated, renamed: "_Raw",
message:
"""
'Raw' has been renamed to '_Raw' to indicate that it is not a guaranteed/stable API.
"""
)
public typealias Raw = _Raw
{raw_dispatching_enum}
'''
_OUTPUT_FILE = 'RawOpsGenerated.swift'
_RENAMED_KEYWORDS = {
'': 'empty',
'in': 'in_',
'var': 'var_',
'where': 'where_',
'if': 'if_',
'for': 'for_',
'Int': 'Intt',
'while': 'while_',
'switch': 'switch_',
'protocol': 'protocol_',
'init': 'init_'}
_TYPE_PROTOCOLS = [
(set(), 'TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64}, 'UnsignedInteger & TensorFlowScalar'),
({types_pb2.DT_INT32,
types_pb2.DT_INT64}, 'TensorFlowIndex'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64}, 'TensorFlowInteger'),
({types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'FloatingPoint & TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64,
types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'TensorFlowNumeric')]
_SWIFTIFIED_TYPES = {
types_pb2.DT_FLOAT: 'Float',
types_pb2.DT_DOUBLE: 'Double',
types_pb2.DT_INT32: 'Int32',
types_pb2.DT_UINT8: 'UInt8',
types_pb2.DT_INT16: 'Int16',
types_pb2.DT_INT8: 'Int8',
types_pb2.DT_INT64: 'Int64',
types_pb2.DT_BOOL: 'Bool',
types_pb2.DT_UINT16: 'UInt16',
types_pb2.DT_UINT32: 'UInt32',
types_pb2.DT_UINT64: 'UInt64'}
_SWIFTIFIED_ATTR_TYPES = {
'int': 'Int64',
'float': 'Double',
'bool': 'Bool',
'string': 'String',
'type': 'TensorDataType',
'shape': 'TensorShape?',
'list(int)': '[Int32]',
'list(float)': '[Double]',
'list(bool)': '[Bool]',
'list(string)': '[String]',
'list(type)': '[TensorDataType]',
'list(shape)': '[TensorShape?]'}
_OMITTED_PARAMETER_NAMES = {
'x', 'y', 'a', 'b', 'input', 'tensor', 'values'}
_START_COMMENT = '///'
X10_OPS = {
"CanonicalDims", "CheckSameDevice", "CheckSameDevice", "CheckSameDevice",
"CheckSameDevice", "CheckSamePrecision", "CheckSamePrecision",
"CheckSamePrecision", "CheckSamePrecision",
"Abs", "Acos", "Acosh", "AddV2", "All", "Any", "ApproximateEqual", "ArgMax",
"ArgMax", "ArgMin", "Asin", "Asinh", "Atan", "Atanh", "ConvertPadding",
"ConvertPadding2", "ConvertDataFormat", "ConvertDataFormat1",
"ConvertDataFormat4", "ConvertMirrorPadMode", "ReversedPaddings",
"AvgPool", "AvgPool3D", "AvgPool3DGrad", "AvgPoolGrad", "BatchMatMulV2",
"BroadcastGradientArgs", "BroadcastTo", "BroadcastTo", "Cast", "Ceil",
"ClipByValue", "ConcatV2", "Conv2D", "Conv2DBackpropFilter",
"Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv2DBackpropInput",
"Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2", "Cos", "Cosh",
"Cumprod", "Cumsum", "DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput",
"DiagPart", "Div", "Elu", "EluGrad", "Equal", "Exp", "ExpandDims", "Expm1",
"Fill", "Fill", "Floor", "Gather", "GatherV2", "Greater", "GreaterEqual",
"InvertPermutation", "IsFinite", "IsInf", "IsNan", "LeakyRelu",
"LeakyReluGrad", "Less", "LessEqual", "LinSpace", "LinSpace", "Log", "Log1p",
"LogSoftmax", "LogicalAnd", "LogicalNot", "LogicalOr",
"MatMul", "Max", "MaxPool3D", "MaxPool3DGrad", "MaxPoolGradV2",
"MaxPoolGradV2", "MaxPoolV2", "MaxPoolV2", "Maximum", "Mean", "Mean", "Min",
"Minimum", "MirrorPad", "MirrorPadGrad", "Mod", "Mul", "Neg", "NotEqual",
"OneHot", "OneHot", "OnesLike", "Pack", "Pad", "PadV2", "PhysicalCast", "Pow",
"Prod", "Qr", "Range", "Rank", "Relu", "Relu6", "Relu6Grad", "ReluGrad",
"Reshape", "Reshape", "ReverseV2", "Round", "Rsqrt", "RsqrtGrad", "Select",
"Selu", "SeluGrad", "Shape", "Sigmoid", "SigmoidGrad", "Sign", "Sin", "Sinh",
"Size", "Slice", "Softmax", "SoftmaxCrossEntropyWithLogits", "Softplus",
"SoftplusGrad", "Softsign", "SoftsignGrad",
"SparseSoftmaxCrossEntropyWithLogits", "Split", "SplitV", "Sqrt", "Square",
"SquaredDifference", "Squeeze", "StatelessMultinomial",
"StatelessRandomNormal", "StatelessRandomNormal", "StatelessRandomUniform",
"StatelessRandomUniform", "StatelessRandomUniformInt",
"StatelessRandomUniformInt", "StatelessTruncatedNormal",
"StatelessTruncatedNormal", "StridedSlice", "StridedSliceGrad", "Sub", "Sum",
"Sum", "Svd", "Tan", "Tanh", "TensorStridedSliceUpdate", "Tile", "ToDevice",
"Transpose", "Unpack", "UnsortedSegmentSum", "Xdivy", "ZerosLike",
"Rand",
}
class UnableToGenerateCodeError(Exception):
def __init__(self, details):
self.details = details
super(UnableToGenerateCodeError, self).__init__()
def __str__(self):
return self.details
class Op(object):
def __init__(self, op_def, api_def, enum_store, string_valued=False):
self.op_def = op_def
self.api_def = api_def
self.enum_store = enum_store
self.string_valued = string_valued
self.inferred_counts = dict()
# Collect all the input and output arguments.
self.input_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.input_arg]
self.output_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.output_arg]
# Collect all attributes.
self.attrs = [
Attribute(attr, op=self)
for attr in op_def.attr]
self.type_attrs = [
attr for attr in self.attrs
if attr.is_type_attr]
def swift_function(self):
return '''
{documentation}@inlinable @inline(__always)
public static func {name}{generics}({input_args}
){return_type} {{
{body}
}}'''.format(
documentation=self._swift_documentation(),
name=self._swift_name(),
generics=self._swift_generics(),
input_args=self._swift_input_args(),
return_type=self._swift_return_type(),
body=self._swift_body())
def swift_dispatch_function(self, x10_supported=False):
return '''
{documentation}@inlinable @inline(__always)
public static func {name}{generics}({input_args}
){return_type} {{
{body}
}}'''.format(
documentation=self._swift_documentation(),
name=self._swift_name(),
generics=self._swift_generics(),
input_args=self._swift_input_args(),
return_type=self._swift_return_type(),
body=self._swift_dispatch_body(x10_supported=x10_supported))
def _swift_documentation(self):
def comment_block(text, indent_level):
"""Returns a commented block of text with some specified indentation."""
def indent(line_index):
if indent_level == 0:
return ''
if line_index:
return ' ' * indent_level
return ' ' * (indent_level - 1) + '- '
return ''.join([
(_START_COMMENT + ' ' + indent(line_index) + line + '\n'
if line else _START_COMMENT + '\n')
for line_index, line in enumerate(text.split('\n'))
])
def append_list(doc, args, arg_type):
"""Returns the documentation for lists of inputs/outputs/attributes."""
args = [arg for arg in args if arg.description]
if len(args) == 1:
block = '%s %s: %s' % (arg_type, args[0].name, args[0].description)
doc += _START_COMMENT + '\n'
doc += comment_block(block, indent_level=1)
elif len(args) > 1:
doc += '%s\n%s - %ss:\n' % (_START_COMMENT, _START_COMMENT, arg_type)
for arg in args:
block = '%s: %s' % (arg.name, arg.description)
doc += comment_block(block, indent_level=2)
return doc
doc = ''
if self.api_def.summary:
doc = comment_block(self.api_def.summary, indent_level=0)
if self.api_def.description:
doc += _START_COMMENT + '\n'
doc += comment_block(self.api_def.description, indent_level=0)
doc = append_list(doc, self.api_def.in_arg, 'Parameter')
doc = append_list(doc, self.api_def.attr, 'Attr')
doc = append_list(doc, self.api_def.out_arg, 'Output')
if doc and not doc.endswith('\n'):
doc = doc + '\n'
return doc
def _swift_name(self):
return swift_compatible_identifier(
self.op_def.name[0].lower() + self.op_def.name[1:])
def _swift_generics(self):
constraints = [
attr.generic_constraints(self.string_valued)
for attr in self.attrs]
constraints = [c for c in constraints if c is not None]
if len(constraints) == 1:
return '<' + ', '.join(constraints) + '>'
if len(constraints) > 1:
return '<\n ' + ',\n '.join(constraints) + '\n>'
return ''
def _swift_input_args(self):
args = ''
for arg in self.input_args:
args += '\n %s: %s,' % (arg.swift_arg_name, str(arg.swift_type(self.string_valued)))
for attr in self.attrs:
if not attr.is_inferred_type_attr and not attr.is_inferred_number_attr:
args += '\n %s: %s%s,' % (attr.swift_arg_name, attr.swift_type, attr.swift_default)
if args != '':
args = args[:-1]
return args
def _swift_return_type(self):
return_type = ''
if len(self.output_args) == 1:
return_type = ' -> ' + str(self.output_args[0].swift_type(self.string_valued))
elif len(self.output_args) > 1:
named_types = [
arg.swift_name + ': ' + str(arg.swift_type(self.string_valued))
for arg in self.output_args]
return_type = ' -> (' + ', '.join(named_types) + ')'
return return_type
def _swift_body(self):
setters = []
for attr in self.attrs:
setters.append(attr.swift_setter(self.string_valued))
for arg in self.input_args:
setters.append(arg.swift_setter())
counts = ['Int({})'.format(arg.swift_count) for arg in self.output_args]
if len(self.output_args) == 0:
body = 'let nOutputs = 0'
else:
body = 'let nOutputs = {}'.format(' + '.join(counts))
body += '\n let op = makeOp("{}", nOutputs)\n '.format(self.op_def.name)
body += '\n '.join(setters)
if len(self.output_args) == 0:
return body + '\n op.execute()'
body += '\n return op.execute({})'.format(', '.join(counts))
return body
def _swift_dispatch_body(self, x10_supported=False):
names = []
tensors = []
backends = []
device_source = None
for arg in self.input_args:
names.append(arg.swift_name)
if arg.is_tensor_type(self.string_valued):
tensors.append(arg)
if arg.is_list:
backends.append("commonBackend(" + arg.swift_name + ")")
else:
device_source = arg
backends.append(arg.swift_name + ".handle.backend")
for attr in self.attrs:
if not attr.is_inferred_type_attr and not attr.is_inferred_number_attr:
names.append(attr.swift_name)
names_filtered = []
for name in names:
if name in _OMITTED_PARAMETER_NAMES:
names_filtered.append(name)
else:
names_filtered.append(name + ": " + name)
dispatch = self._swift_name() + "(" + (", ".join(names_filtered)) + ")"
if len(backends) == 0 and x10_supported:
print("x10 unsupported: " + str(self.swift_name()))
def do_conversion(arg):
return ("\n let {name} = {typename}(copying: {name}, to: .defaultTFEager)"
.format(name=arg.swift_name,
typename=str(arg.swift_type(self.string_valued))))
def get_common_backend(x, y):
return "commonBackend({}, {})".format(x, y)
if len(backends) == 0 or (not x10_supported and (len(self.output_args) != 1
or not self.output_args[0].is_tensor_type(self.string_valued) or not device_source)):
return "_RawTFEager." + dispatch
if not x10_supported:
return """switch {backends} {{
case .XLA:
let output_device = {convert_device}.device{convert_tensors}
return {convert_type}(copying: _RawTFEager.{dispatch}, to: output_device)
case .TF_EAGER:
return _RawTFEager.{dispatch}
}}
""".format(dispatch=dispatch,
convert_type=str(self.output_args[0].swift_type(self.string_valued)),
convert_device=str(device_source.swift_name),
convert_tensors = "".join(map(do_conversion, tensors)),
backends=reduce(get_common_backend, backends))
return """switch {backends} {{
case .XLA:
return _RawXLA.{dispatch}
case .TF_EAGER:
return _RawTFEager.{dispatch}
}}
""".format(dispatch=dispatch,
backends=reduce(get_common_backend, backends))
class Argument(object):
def __init__(self, arg_def, op):
self.arg_def = arg_def
self.op = op
self.is_list = arg_def.number_attr is not u'' \
or arg_def.type_list_attr is not u''
@property
def name(self):
return self.arg_def.name
@property
def swift_name(self):
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
def swift_type(self, string_valued=False):
return self.type.swift_type(
string_valued=self.allows_string and string_valued)
def swift_setter(self):
if self.is_list:
return 'op.addInputList({})'.format(self.swift_name)
else:
return 'op.addInput({})'.format(self.swift_name)
@property
def swift_count(self):
number_attr = self.arg_def.number_attr
if number_attr and number_attr in self.op.inferred_counts:
return self.op.inferred_counts[number_attr]
if self.arg_def.type_list_attr:
return self.op.inferred_counts[self.arg_def.type_list_attr]
return '1'
@property
def type(self):
number = self.arg_def.number_attr
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return Type('Tensor', base_type=type_attr.swift_name, number=number)
if self.arg_def.type_list_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_list_attr)
# There are never any numbered type lists.
return Type(type_attr.swift_name)
if self.arg_def.type in _SWIFTIFIED_TYPES:
base_type = _SWIFTIFIED_TYPES[self.arg_def.type]
return Type('Tensor', base_type=base_type, number=number)
if self.arg_def.type == types_pb2.DT_STRING:
return Type('Tensor', base_type='String', number=number)
if self.arg_def.type == types_pb2.DT_RESOURCE:
return Type('ResourceHandle', number=number)
if self.arg_def.type == types_pb2.DT_VARIANT:
return Type('VariantHandle', number=number)
raise UnableToGenerateCodeError(
'Unsupported type for argument "%s".' % self.name)
def is_tensor_type(self, string_valued=False):
return self.type.kind == 'Tensor' and not (
(self.allows_string and string_valued) or self.type.base_type == 'String')
@property
def allows_string(self):
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return types_pb2.DT_STRING in type_attr.attr_def.allowed_values.list.type
return False
class Type(object):
def __init__(self, kind, base_type=None, number=None):
self.kind = kind
self.base_type = base_type
self.number = number
@property
def count(self):
return self.number if self.number else 1
def swift_type(self, string_valued=False):
if self.kind == 'Tensor':
if self.base_type == 'String' or string_valued:
name = 'StringTensor'
else:
name = 'Tensor<' + self.base_type + '>'
elif self.kind == 'TensorHandle':
name = 'TensorHandle<' + self.base_type + '>'
elif self.kind == 'ResourceHandle':
name = 'ResourceHandle'
elif self.kind == 'VariantHandle':
name = 'VariantHandle'
else:
name = self.kind
return ('[%s]' % name) if self.number else name
class Attribute(object):
"""Represents information extracted from op `type` and `list(type)` attributes."""
def __init__(self, attr_def, op):
self.attr_def = attr_def
self.op = op
self.is_type_attr = attr_def.type in ['type', 'list(type)']
# Check whether the value of this attribute can be
# inferred automatically (this only applies to
# type-valued attributes).
input_args = list(op.op_def.input_arg)
output_args = list(op.op_def.output_arg)
input_arg_type_attrs = set(
[arg.type_attr for arg in input_args] +
[arg.type_list_attr for arg in input_args])
output_arg_type_attrs = set(
[arg.type_attr for arg in output_args] +
[arg.type_list_attr for arg in output_args])
arg_type_attrs = input_arg_type_attrs.union(output_arg_type_attrs)
self.is_inferred_type_attr = attr_def.name in arg_type_attrs
self.is_output_type_attr = attr_def.name in output_arg_type_attrs
self.is_func_attr = self.attr_def.type == 'func'
# We use this for obtaining the `_typeList` property.
self.input_arg = None
self.is_inferred_number_attr = False
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr] or \
self.attr_def.name == arg.arg_def.number_attr:
self.input_arg = arg
self.is_inferred_number_attr = True
break
# The following properties are only relevant for
# non-inferred-type-valued attributes.
self._swift_type = ''
self._use_enum = False
if not self.is_inferred_type_attr and not self.is_func_attr:
if self.attr_def.type not in _SWIFTIFIED_ATTR_TYPES:
raise UnableToGenerateCodeError(
'Unsupported type for attribute "%s".'
% self.attr_def.name)
# Get the arg type.
self._swift_type = _SWIFTIFIED_ATTR_TYPES[self.attr_def.type]
# Check if the arg is an enum type.
self._use_enum = False
if self.attr_def.type == 'string':
allowed_values = tuple(sorted(self.attr_def.allowed_values.list.s))
if allowed_values:
self._swift_type = self.op.enum_store.maybe_add(
allowed_values, self.attr_def.name)
self._use_enum = True
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
self._swift_type = '({}) -> {}'.format(input_type, output_type)
@property
def name(self):
return self.attr_def.name
@property
def swift_name(self):
if self.is_inferred_type_attr:
return swift_compatible_identifier(
self.name, capitalize=True)
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
@property
def swift_type(self):
return self._swift_type
@property
def swift_default(self):
def swift_float(f):
if f == float('inf'): return 'Double.infinity'
if f == float('-inf'): return '-Double.infinity'
return '%g' % f
if not self.is_inferred_type_attr and self.attr_def.default_value:
default_value = self.attr_def.default_value
if default_value.HasField('b'):
default_value = str(default_value.b).lower()
elif default_value.HasField('i'):
default_value = str(default_value.i)
elif default_value.HasField('f'):
default_value = swift_float(default_value.f)
elif default_value.HasField('s') and default_value.s:
s = str(default_value.s)
default_value = '.' + swift_compatible_identifier(s.lower()) \
if self._use_enum else json.dumps(s) # '"' + s + '"'
elif default_value.HasField('list'):
if default_value.list.i:
default_values = [str(s) for s in default_value.list.i]
default_value = '[' + ', '.join(default_values) + ']'
elif default_value.list.f:
default_values = [swift_float(s) for s in default_value.list.f]
default_value = '[' + ', '.join(default_values) + ']'
else:
default_value = None
else:
default_value = None
if default_value is not None:
default_value = default_value.replace("\t", "\\t")
return ' = ' + default_value
return ''
def swift_setter(self, string_valued=False):
# Inferred-type-valued attributes.
if self.is_inferred_type_attr:
name = self.swift_name
if self.input_arg is not None:
name = self.input_arg.swift_name
if self.attr_def.type == 'list(type)' or self.is_inferred_number_attr:
self.op.inferred_counts[self.name] = name + '._typeList.count'
if self.attr_def.type == 'list(type)':
return 'op.updateAttribute("{}", {}._typeList)'.format(self.name, name)
if string_valued and self.allows_string:
return 'op.updateAttribute("{}", TensorDataType(TF_STRING))'.format(self.name)
return 'op.updateAttribute("{}", {}.tensorFlowDataType)'.format(self.name, self.swift_name)
if self.is_inferred_number_attr:
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.input_arg.swift_name + '.count'
return 'op.updateAttribute("{}", {}.count)'.format(self.name, self.input_arg.swift_name)
if self.attr_def.type == 'int':
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.swift_name
# Remaining attributes.
value = self.swift_name + '.cName' if self._use_enum else self.swift_name
return 'op.updateAttribute("{}", {})'.format(self.name, value)
def generic_constraints(self, string_valued):
# We use this for obtaining the `_typeList` property.
input_arg = None
if self.attr_def.type == 'list(type)':
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr]:
input_arg = arg
break
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
return '{}: TensorGroup,\n {}: TensorGroup'.format(
input_type, output_type)
if not self.is_inferred_type_attr:
return None
protocol = None
if self.attr_def.type == 'list(type)' and input_arg is None:
protocol = 'TensorGroup'
elif self.attr_def.type == 'list(type)':
protocol = 'TensorArrayProtocol'
elif self.attr_def.type == 'type':
if string_valued and self.allows_string:
return None
protocol = 'TensorFlowScalar'
allowed_types = set(self.attr_def.allowed_values.list.type)
allowed_types &= set(_SWIFTIFIED_TYPES.keys())
for types, protocol_name in _TYPE_PROTOCOLS:
if allowed_types.issubset(types):
protocol = protocol_name
break
if protocol is not None:
return self.swift_name + ': ' + protocol
return None
@property
def allows_string(self):
return types_pb2.DT_STRING in self.attr_def.allowed_values.list.type
def swift_compatible_identifier(s, capitalize=False):
"""Transforms an identifier to be more swift idiomatic."""
if capitalize:
s = s.capitalize()
without_underscores = []
capitalize_next_char = False
for c in s:
if c == '-' or c == '_' or c == '(' or c == ')' or c == '<' or c == '>':
capitalize_next_char = True
elif capitalize_next_char:
capitalize_next_char = False
without_underscores.append(c.upper())
else:
without_underscores.append(c)
s = ''.join(without_underscores)
if s in _RENAMED_KEYWORDS:
return _RENAMED_KEYWORDS[s]
return s
class EnumStore(object):
"""Stores details on string attributes represented as swift enums."""
def __init__(self):
self._entries = {}
self._type_names = set()
def enum_codes(self):
"""Generates the swift code for enums."""
codes = []
entries = list(six.iteritems(self._entries))
for allowed_values, type_name in sorted(entries, key=lambda x: x[1]):
allowed_values = [str(a) for a in allowed_values]
codes.append(
# FIXME: Re-add `@_frozen` after SR-9739 is resolved.
# https://bugs.swift.org/browse/SR-9739
# '@_frozen\n' +
' // @_frozen // SR-9739\n' +
' public enum {} {{\n'.format(type_name) +
'\n'.join([' case {}'.format(
swift_compatible_identifier(a.lower()))
for a in allowed_values]) +
'\n\n' +
' @inlinable\n' +
' var cName: String {\n' +
' @inline(__always)\n' +
' get {\n' +
' switch self {\n' +
'\n'.join([' case .{}: return "{}"'.format(
swift_compatible_identifier(a.lower()), a)
for a in allowed_values]) +
'\n' +
' }\n' +
' }\n' +
' }\n' +
' }')
return codes
def enum_codes_forwarding(self):
codes = []
entries = list(six.iteritems(self._entries))
for allowed_values, type_name in sorted(entries, key=lambda x: x[1]):
codes.append(' public typealias {} = _RawTFEager.{}'.format(type_name, type_name))
return codes
def maybe_add(self, allowed_values, attr_def_name):
if allowed_values in self._entries:
return self._entries[allowed_values]
type_name = swift_compatible_identifier(attr_def_name, capitalize=True)
base_typename = type_name
counter = 1
while type_name in self._type_names:
type_name = base_typename + str(counter)
counter += 1
self._type_names.add(type_name)
self._entries[allowed_values] = type_name
return type_name
def main(argv):
del argv # Unused.
if FLAGS.output_path is None:
raise ValueError('No output_path has been set')
api_def_map = c_api_util.ApiDefMap()
op_codes = []
op_codes_forwarding = []
enum_store = EnumStore()
op_names = api_def_map.op_names()
if FLAGS.api_def_path is not None:
for op_name in op_names:
path = os.path.join(FLAGS.api_def_path, 'api_def_%s.pbtxt' % op_name)
if not tf.gfile.Exists(path):
continue
with tf.gfile.Open(path, 'r') as fobj:
data = fobj.read()
try:
api_def_map.put_api_def(data)
except Exception as e:
print('Cannot load api def for %s: %s' % (op_name, str(e)))
num_generated = 0
for op_name in sorted(op_names):
try:
if op_name[0] == '_': continue
op_def = api_def_map.get_op_def(op_name)
if any(a.is_ref for a in op_def.input_arg):
raise UnableToGenerateCodeError('has ref-valued input')
if any(a.is_ref for a in op_def.output_arg):
raise UnableToGenerateCodeError('has ref-valued output')
api_def = api_def_map.get_api_def(bytes(op_name, 'utf8'))
# It would be nicer to handle `StringTensor` in a more
# general way by having `String` conform to `TensorFlowScalar`.
default_op = Op(op_def, api_def, enum_store, string_valued=False)
string_valued_op = Op(op_def, api_def, enum_store, string_valued=True)
default_code = default_op.swift_function()
string_valued_code = string_valued_op.swift_function()
op_codes.append(default_code)
string_valued_op_different = False
if string_valued_code != default_code:
string_valued_op_different = True
op_codes.append(string_valued_code)
default_code = default_op.swift_dispatch_function(x10_supported=op_name in X10_OPS)
string_valued_code = string_valued_op.swift_dispatch_function()
op_codes_forwarding.append(default_code)
if string_valued_op_different:
op_codes_forwarding.append(string_valued_code)
num_generated += 1
except UnableToGenerateCodeError as e:
print('Cannot generate code for %s: %s' % (op_name, e.details))
print('Generated code for %d/%d ops.' % (num_generated, len(op_names)))
version_codes = [
' static let generatedTensorFlowVersion = "%s"' % tf.__version__,
' static let generatedTensorFlowGitVersion = "%s"' % tf.__git_version__]
swift_code = (
_WARNING +
_HEADER +
'import CTensorFlow\n\n' +
'@inlinable @inline(__always)\n' +
'func makeOp(_ name: String, _ nOutputs: Int) -> TFTensorOperation {\n' +
' _ExecutionContext.makeOp(name, nOutputs)\n' +
'}\n'+
'\n\npublic enum _RawTFEager {\n\n' +
'\n'.join(version_codes) +
'\n\n' +
'\n\n'.join(enum_store.enum_codes()) +
'\n\n' +
'\n'.join(op_codes) +
'\n\n}\n')
with tf.gfile.Open(FLAGS.output_path, 'w') as f:
f.write(swift_code)
swift_code = (
_WARNING +
_HEADER +
_DISPATCHER_TEMPLATE.format(raw_dispatching_enum=
'public enum _Raw {\n\n' +
'\n'.join(version_codes) +
'\n\n' +
'\n\n'.join(enum_store.enum_codes_forwarding()) +
'\n\n' +
'\n'.join(op_codes_forwarding) + '\n\n}'))
if FLAGS.dispatching_output_path:
with tf.gfile.Open(FLAGS.dispatching_output_path, 'w') as f:
f.write(swift_code)
if __name__ == '__main__':
tf.app.run(main)
|
'''Basic classes and functions for RVSPath items.'''
import attr
import geopandas as gpd
import pandas as pd
from typing import Any, Dict, Sequence, Tuple
import os
from shapely.geometry.base import BaseGeometry
from shapely.geometry import LineString, Polygon
import util
_Geo_DataFrame_Driver = "GPKG"
VERSION = 0.5
@attr.s
class GeoEntity:
"""Construct a geo entity.
`geo_landmarks` the geo landmarks (start and end points + pivots).
`geo_features` the spatial features of the path.
Dictionary values can be of either type str or int.
`route` the path from the start to end point.
"""
geo_landmarks: Dict[str, gpd.GeoDataFrame] = attr.ib()
geo_features: Dict[str, Any] = attr.ib()
route: gpd.GeoDataFrame = attr.ib()
@classmethod
def add_entity(cls,
route: gpd.GeoDataFrame,
geo_landmarks: Dict[str, gpd.GeoDataFrame],
geo_features: Dict[str, Any]):
geo_entity = GeoEntity({}, geo_features, route)
for landmark_type, landmark in geo_landmarks.items():
geo_landmark = GeoLandmark.create_from_pivot(landmark, landmark_type)
geo_entity.geo_landmarks[geo_landmark.landmark_type] = geo_landmark
return geo_entity
@attr.s
class GeoLandmark:
"""Construct a geo landmark.
`landmark_type` type of landmark.
`osmid` osmid of the landmark.
`geometry` the geometry of the landmark.
`main_tag` the tag of the entity that will appear in the instruction.
`pivot_gdf` the GeoDataFrame format of the entity.
"""
landmark_type: str = attr.ib()
osmid: int = attr.ib()
geometry: BaseGeometry = attr.ib()
main_tag: str = attr.ib()
pivot_gdf: gpd.GeoDataFrame = attr.ib()
def to_rvs_format(self):
"""Reformat a GeoLandmark into an RVS style."""
centroid = util.tuple_from_point(
self.geometry.centroid) if self.geometry else None
return [
self.landmark_type,
self.osmid,
self.main_tag,
centroid
]
@classmethod
def create_from_pivot(cls, pivot: gpd.GeoDataFrame, pivot_type: str):
"""Construct a GeoLandmark."""
return GeoLandmark(
pivot_type,
pivot['osmid'],
pivot['geometry'],
pivot['main_tag'],
pivot
)
@attr.s
class RVSSample:
"""Construct a RVS sample.
`geo_landmarks` the geo landmarks (start and end points + pivots).
`geo_features` the spatial features of the path.
Dictionary values can be of either type str or int.
`route_len` the length of the path from the start to end point.
`instructions` the text instructing how to get from start location to goal location.
`id` the sample id.
`version` the datasets version.
`entity_span` the entity span in the instruction.
Key: entity name. Value: tuple of start and end of the span.
"""
geo_landmarks: Dict[str, gpd.GeoDataFrame] = attr.ib()
geo_features: Dict[str, Any] = attr.ib()
route_len: int = attr.ib()
instructions: str = attr.ib()
id: int = attr.ib()
version: float = attr.ib()
entity_span: Dict[str, Tuple[int, int]] = attr.ib()
@classmethod
def to_rvs_sample(self,
instructions: str,
id: int,
geo_entity: GeoEntity,
entity_span: Dict[str, Tuple[int, int]]):
"""Construct a RVS sample from GeoEntity."""
landmark_list = {}
for type_landmark, landmark in geo_entity.geo_landmarks.items():
landmark_list[type_landmark]= landmark.to_rvs_format()
route_length = round(util.get_linestring_distance(geo_entity.route))
return RVSSample(
landmark_list,
geo_entity.geo_features,
route_length,
instructions,
id,
VERSION,
entity_span)
def save(entities: Sequence[GeoEntity], path_to_save: str):
path_to_save = os.path.abspath(path_to_save)
landmark_types = entities[0].geo_landmarks.keys()
geo_types_all = {}
empty_gdf = gpd.GeoDataFrame(
columns=['osmid', 'geometry', 'main_tag'])
for landmark_type in landmark_types:
geo_types_all[landmark_type] = empty_gdf
columns = ['geometry'] + list(entities[0].geo_features.keys())
geo_types_all['path_features'] = gpd.GeoDataFrame(columns=columns)
for entity in entities:
for pivot_type, pivot in entity.geo_landmarks.items():
geo_types_all[pivot_type] = geo_types_all[pivot_type].append(pivot.pivot_gdf)
dict_features = {k: [v] for k,v in entity.geo_features.items()}
pd_features = pd.DataFrame(dict_features)
geometry = Polygon(LineString(entity.route['geometry'].tolist()))
features_gdf = gpd.GeoDataFrame(pd_features, geometry=[geometry])
geo_types_all['path_features'] = geo_types_all['path_features'].append(features_gdf)
# Save pivots.
if os.path.exists(path_to_save):
mode = 'a'
else:
mode = 'w'
for geo_type, pivots_gdf in geo_types_all.items():
pivots_gdf.to_file(
path_to_save, layer=geo_type, mode=mode, driver=_Geo_DataFrame_Driver)
|
import sys
import unittest
import numpy as np
import pygimli as pg
from pygimli.physics import VESManager, ERTManager
from pygimli.physics.em import VMDTimeDomainModelling
np.random.seed(1337)
class TestManagers(unittest.TestCase):
def test_ERT(self, showProgress=False):
dat = pg.getExampleFile('ert/gallery.dat', load=True, verbose=True)
mesh = pg.meshtools.createParaMesh(dat.sensors(), quality=33.4,
paraDX=0.3, paraMaxCellSize=0.5, paraDepth=8)
#with SR
ert = ERTManager(sr=True, useBert=True, verbose=False, debug=False)
mod = ert.invert(dat, mesh=mesh, maxIter=20, lam=10)
np.testing.assert_approx_equal(ert.inv.chi2(), 1.003, significant=3)
#without SR
ert = ERTManager(sr=False, useBert=True, verbose=False, debug=False)
mod = ert.invert(dat, mesh=mesh, maxIter=20, lam=10)
# np.testing.assert_approx_equal(ert.inv.chi2(), 0.9833, significant=3)
def test_TT(self, showProgress=False):
pass
def test_VMD(self, showProgress=False):
t = np.logspace(-5.5, -2.2, 20)
verbose = False
fop = VMDTimeDomainModelling(times=t, txArea=10000.0, rxArea=10000.0,
verbose=verbose)
# [thick[3], res[4]] nLay=4
vmdMgr = pg.frameworks.MethodManager1d(fop)
synthModel = np.array([25., 5., 100., 150., 1., 10., 4.])
ra = vmdMgr.simulate(synthModel)
err = abs(np.log(t)/2) * 0.01
ra *= 1. + pg.randn(len(ra), seed=1337) * err
model = vmdMgr.invert(ra, err, nLayers=4, layerLimits=[2, 500],
maxIter=50,
showProgress=showProgress, verbose=verbose)
if showProgress is True:
fop.drawModel(ax=vmdMgr.inv.axs[0], model=synthModel, label='Synth')
np.testing.assert_array_less(vmdMgr.fw.chi2(), 1.5)
def test_VES(self, showProgress=False):
"""
"""
thicks = [2., 10.]
res = [100., 5., 30] # Ohm m
phi = [0., 20., 0.] #neg mrad
# model fails
# thicks = [2., 6., 10.]
# res = [100., 500., 20., 800.]
# phi = [0., 20., 50., 0]
synthModel = pg.cat(thicks, res)
ab2 = np.logspace(np.log10(1.5), np.log10(100.), 25)
mgr = VESManager(verbose=False, debug=False)
if showProgress:
mgr.verbose = True
fig, axs = pg.plt.subplots(2, 4, figsize=(12,7))
mgr.inv.axs = [axs[0][0], axs[1][0]]
### Test -- basic
ra, err = mgr.simulate(synthModel, ab2=ab2, mn2=1.0, noiseLevel=0.01, seed=0)
mgr.exportData('synth.ves', ra, err)
mgr.invert(ra, err, nLayers=4, lam=100, layerLimits=False,
showProgress=showProgress)
if showProgress is True:
mgr.fop.drawModel(ax=axs[0][0], model=synthModel, label='Synth')
np.testing.assert_array_less(mgr.fw.chi2(), 1)
### Test -- reinit with new parameter count
if showProgress is True:
mgr.inv.axs = [axs[0][1], axs[1][1]]
mgr.invert(ra, err, nLayers=5, layerLimits=False,
showProgress=showProgress)
if showProgress is True:
mgr.fop.drawModel(ax=axs[0][1], model=synthModel, label='Synth')
# axs[0][1].legend()
np.testing.assert_array_less(mgr.inv.inv.chi2(), 1)
### Test -- reinit with new data basis
ab2_2 = np.logspace(np.log10(1.5), np.log10(50.), 10)
ra, err = mgr.simulate(synthModel, ab2=ab2_2, mn2=1.0, noiseLevel=0.01, seed=0)
if showProgress is True:
mgr.inv.axs = [axs[0][2], axs[1][2]]
mgr.invert(ra, err, nLayers=4, ab2=ab2_2, mn2=1.0, layerLimits=False,
showProgress=showProgress)
if showProgress is True:
mgr.fop.drawModel(ax=axs[0][2], model=synthModel, label='Synth')
# axs[0][2].legend()
# np.testing.assert_approx_equal(mgr.inv.inv.chi2(), 0.5242201187682258,
# significant=3)
### Test -- reinit with complex resistivies
mgr.complex = True
synthModel = pg.cat(synthModel, np.array(phi)*1e-3)
ra, err = mgr.simulate(synthModel, ab2=ab2, mn2=1.0, noiseLevel=0.01, seed=1337)
if showProgress is True:
mgr.inv.axs = [axs[0][3], axs[1][3]]
mgr.invert(ra, err, layerLimits=False, showProgress=showProgress, maxIter=50)
np.testing.assert_array_less(mgr.inv.inv.chi2(), 1.09)
if __name__ == '__main__':
unittest.main()
|
from tempy import Div, Content, B, P
character = Div(klass='chr')(
B()(Content('name')),
P()('height:', Content('height')),
P()('mass:', Content('mass')),
P()('hair_color:', Content('hair_color')),
P()('skin_color:', Content('skin_color')),
P()('eye_color:', Content('eye_color')),
P()('birth_year:', Content('birth_year')),
P()('gender:', Content('gender')),
P()('homeworld:', Content('homeworld')),
P()('created:', Content('created')),
P()('edited:', Content('edited')),
P()('url:', Content('url')),
)
|
"""Data pipeline.
Forked from simclr/tf2 codebase.
"""
import functools
from typing import Optional
from absl import logging
from growneuron.imagenet import data_util
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
def build_input_fn(
builder,
global_batch_size,
topology,
is_training,
image_size = 224):
"""Build input function.
Args:
builder: TFDS builder for specified dataset.
global_batch_size: Global batch size.
topology: An instance of `tf.tpu.experimental.Topology` or None.
is_training: Whether to build in training mode.
image_size: Size of the output images.
Returns:
A function that accepts a dict of params and returns a tuple of images and
features, to be used as the input_fn in TPUEstimator.
"""
def _input_fn(input_context):
"""Inner input function."""
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
logging.info('Global batch size: %d', global_batch_size)
logging.info('Per-replica batch size: %d', batch_size)
preprocess_fn = get_preprocess_fn(is_training, image_size)
def map_fn(image, label):
"""Produces multiple transformations of the same batch."""
image = preprocess_fn(image)
return image, label
dataset = builder.as_dataset(
split='train' if is_training else 'validation',
shuffle_files=is_training,
as_supervised=True)
logging.info('num_input_pipelines: %d', input_context.num_input_pipelines)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_context.num_input_pipelines > 1:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
buffer_multiplier = 50 if image_size <= 32 else 10
dataset = dataset.shuffle(batch_size * buffer_multiplier)
dataset = dataset.repeat(-1)
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
prefetch_buffer_size = 2 * topology.num_tpus_per_task if topology else 2
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _input_fn
def get_preprocess_fn(is_training, image_size=224):
"""Get function that accepts an image and returns a preprocessed image."""
# Disable test cropping for small images (e.g. CIFAR)
if image_size <= 32:
test_crop = False
else:
test_crop = True
return functools.partial(
data_util.preprocess_image,
image_size=image_size,
is_training=is_training,
test_crop=test_crop)
|
import apache.thrift.metadata.lite_types as _fbthrift_metadata
def gen_metadata_service_NullService() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_service_NullService(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
def _fbthrift_gen_metadata_service_NullService(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "empty.NullService"
if qualified_name in metadata_struct.services:
return metadata_struct
functions = [
]
service_dict = dict(metadata_struct.services)
service_dict[qualified_name] = _fbthrift_metadata.ThriftService(name=qualified_name, functions=functions, structured_annotations=[
])
new_struct = metadata_struct(services=service_dict)
return new_struct
def getThriftModuleMetadata() -> _fbthrift_metadata.ThriftMetadata:
meta = _fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={})
meta = _fbthrift_gen_metadata_service_NullService(meta)
return meta
|
import commands
import logging
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcases:
test = test_gbp_l3p_neg()
if test.test_gbp_l3p_neg_1() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_NEG_1')
if test.test_gbp_l3p_neg_2() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_NEG_2')
if test.test_gbp_l3p_neg_3() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_NEG_3')
if test.test_gbp_l3p_neg_4() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_NEG_4')
if test.test_gbp_l3p_neg_5() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_NEG_5')
test.cleanup()
utils_libs.report_results('test_gbp_l3p_neg', 'test_results.txt')
sys.exit(1)
class test_gbp_l3p_neg(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_l3p_neg.log'
commands.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_l3p_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info("\n## START OF GBP L3_POLICY NEGATIVE TESTSUITE\n")
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.l3p_name = 'demo_l3p'
def cleanup(self, tc_name=''):
if tc_name != '':
self._log.info('%s: FAILED' % (tc_name))
for obj in ['group', 'l2p', 'l3p']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_l3p_neg_1(self):
self._log.info(
"\n#############################################\n"
"TESTCASE_GBP_L3P_NEG_1: TO CREATE/VERIFY L3POLICY "
"with INVALID IP-POOL\n"
"TEST_STEPS::\n"
"Create L3Policy Object with Invalid IP-Pool\n"
"Invalid IP-Pools: x.y.0.0/24, 0.0.0.0/0,255.255.255.255/32,"
"0.2323.0.0/24\n"
"Verify the create FAILs and config rolls back\n"
"############################################\n")
# Testcase work-flow starts
count = 0
invalid_pools = [
'x.y.0.0/24',
'0.2323.0.0/24',
'0.0.0.0/0',
'255.255.255.255/32']
for pool in invalid_pools:
self._log.info(
"\n## Step 1A: Create L3Policy with Invalid IP-Pool = %s ##" %
(pool))
if self.gbpcfg.gbp_policy_cfg_all(
1, 'l3p', self.l3p_name, ip_pool=pool) != 0:
self._log.info(
"# Step 1A: Create L3Policy with Invalid IP-Pool %s did "
"NOT fail" %
(pool))
self._log.info('# Step 1A: Verify L3Policy did NOT get created')
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l3p', self.l3p_name) != 0:
self._log.info(
"# Step 1A: L3Policy did NOT fail to create even with "
"Invalid IP-Pool %s" %
(pool))
count += 1
if count > 0:
return 0
else:
self._log.info("\nTESTCASE_GBP_L3P_NEG_1: PASSED")
return 1
def test_gbp_l3p_neg_2(self):
self._log.info(
"\n############################################\n"
"TESTCASE_GBP_L3P_NEG_2: TO CREATE/VERIFY L3POLICY with INVALID "
"SUBNET-PREF-LENGTH\n"
"TEST_STEPS::\n"
"Create L3Policy Object with Invalid Subnet-Prefix-Length\n"
"Invalid Subnet-Prefix-Lengths: 33,'AB','32'\n"
"Verify the create FAILs and config rolls back\n"
"############################################\n")
# Testcase work-flow starts
cnt = 0
invalid_prefix_length = ['33', 'AB', '32']
for prefix in invalid_prefix_length:
self._log.info(
"\n## Step 1A: Create L3Policy with Invalid "
"Prefix-lenght = %s ##" %
(prefix))
if self.gbpcfg.gbp_policy_cfg_all(
1, 'l3p', self.l3p_name, subnet_prefix_length=prefix) != 0:
self._log.info(
"# Step 1A: Create L3Policy with Invalid IP-Pool %s "
"did NOT fail" %
(prefix))
self._log.info('# Step 1A: Verify L3Policy did NOT get created')
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l3p', self.l3p_name) != 0:
self._log.info(
"# Step 1A: L3Policy did NOT fail to create even with "
"Invalid IP-Pool %s" %
(prefix))
cnt += 1
if cnt > 0:
return 0
else:
self._log.info("\nTESTCASE_GBP_L3P_NEG_2: PASSED")
return 1
def test_gbp_l3p_neg_3(self):
self._log.info(
"\n############################################\n"
"TESTCASE_GBP_L3P_NEG_3: TO CREATE/VERIFY L3POLICY with mix "
"of VALID & INVALID ATTRs\n"
"TEST_STEPS::\n"
"Create L3Policy with a mix of Valid IP-Pool and Invalid "
"Subnet-Prefix-Length & Vice-versa\n"
"Invalid IP-Pool: x.y.0.0/24,Valid Subnet-Pref-Len: 30\n"
"Valid IP-Pool: 20.20.20.0/24, Invalid Subnet-Pref-Len: 32\n"
"Verify the create FAILs and config rolls back\n"
"############################################\n")
# Testcase work-flow starts
mix_attr = {'x.y.0.0/24': '30', '20.20.20.0/24': '32'}
_pass = 0
for ip, pref in mix_attr.iteritems():
self._log.info(
"\n## Step 1A: Create L3Policy with IP-Pool = %s & "
"Subnet-Pref-Len = %s ##" %
(ip, pref))
if self.gbpcfg.gbp_policy_cfg_all(
1,
'l3p',
self.l3p_name,
ip_pool=ip,
subnet_prefix_length=pref) != 0:
self._log.info(
"# Step 1A: Create L3Policy with mix of valid and "
"invalid did NOT fail")
self._log.info('# Step 1A: Verify L3Policy did NOT get created')
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l3p', self.l3p_name) != 0:
self._log.info(
"# Step 1A: L3Policy did NOT fail to create even with "
"mix of Valid and Invalid attrs %s")
_pass += 1
if _pass > 0:
return 0
else:
self._log.info("\nTESTCASE_GBP_L3P_NEG_3: PASSED")
return 1
def test_gbp_l3p_neg_4(self):
self._log.info(
"\n#################################################\n"
"TESTCASE_GBP_L3P_NEG_4: TO UPDATE/VERIFY L3POLICY with "
"INVALID ATTRs\n"
"TEST_STEPS::\n"
"Create a L3Policy with default attr values\n"
"Update the L3Policy with Invalid Subnet-Prefix-Length\n"
"Update the L3Policy with Valid IP-Pool, should fail as "
"ip-pool is Immutable attr\n"
"Verify the update fails and config roll backs to original "
"values of the L3Policy\n"
"###############################################\n")
# Testcase work-flow starts
self._log.info('\n## Step 1: Create a L3P with default attribute ##\n')
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(1, 'l3p', self.l3p_name)
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l3p', l3p_uuid, subnet_prefix_length='32') != 0:
self._log.info(
"\n## Step 2: Updating L3Policy's Subnet-Prefix-Length "
"with Invalid Value=32 did NOT fail")
return 0
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l3p', l3p_uuid, ip_pool='20.20.0.0/24') != 0:
self._log.info(
"\n## Step 3: Updating L3Policy's Immutable attr IP-Pool "
"did NOT fail")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
id=l3p_uuid,
name=self.l3p_name,
ip_pool='10.0.0.0/8',
subnet_prefix_length='24') == 0:
self._log.info(
"\n## Step 4: L3Policy config did NOT roll back to original "
"default values")
return 0
self.gbpcfg.gbp_policy_cfg_all(
0, 'l3p', l3p_uuid) # clean-up before next testcase
self._log.info("\nTESTCASE_GBP_L3P_NEG_4: PASSED")
return 1
def test_gbp_l3p_neg_5(self):
self._log.info(
"\n#################################################\n"
"TESTCASE_GBP_L3P_NEG_5: TO CREATE/UPDATE L3POLICY with "
"SUBNET-PREF-LENGTH GREATER than IP-POOL's MASK-LENGTH\n"
"TEST_STEPS::\n"
"Create a L3Policy with non-default attr, "
"subnet-pref-length > mask-length of pool\n"
"Verify the above L3Policy creation fails\n"
"Create a L3Policy with default attrs\n"
"Update the L3Policy's subnet-pref-length such that "
"subnet-pref-length > mask-length of pool\n"
"Verify the update fails and L3Policy attrs persists with "
"default values\n"
"##################################################\n")
# Testcase work-flow starts
self._log.info('\n## Step 1: Create a L3P with default attribute ##\n')
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(1, 'l3p', self.l3p_name)
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l3p', l3p_uuid, subnet_prefix_length='4') != 0:
self._log.info(
"\n## Step 2: Updating L3Policy's "
"Subnet-Prefix-Length > default Mask-length(8) did NOT fail")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
id=l3p_uuid,
name=self.l3p_name,
ip_pool='10.0.0.0/8',
subnet_prefix_length='24') == 0:
self._log.info(
"\n## Step 3: L3Policy config did NOT roll back "
"to original default values")
return 0
if self.gbpcfg.gbp_policy_cfg_all(
1,
'l3p',
'new_l3p',
ip_pool='20.20.20.0/24',
subnet_prefix_length='16') != 0:
self._log.info(
"\n## Step 4: Creating L3Policy with "
"Subnet-Prefix-Length > Mask-Length(24) did NOT fail")
return 0
self._log.info("\nTESTCASE_GBP_L3P_NEG_5: PASSED")
return 1
if __name__ == '__main__':
main()
|
import functools
import json
import logging
from urllib import quote
_logger = logging.getLogger(__name__)
class JsonObject(dict):
def __init__(self, *args, **kwargs):
super(JsonObject, self).__init__(*args, **kwargs)
json = json.dumps
class Entity(JsonObject):
def __init__(self, target_endpoint, client, *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.target_endpoint = target_endpoint
self.client = client
try:
for attribute, value in self['entity'].items():
domain_name, suffix = attribute.rpartition('_')[::2]
if suffix == 'url':
manager_name = domain_name if domain_name.endswith('s') else '%ss' % domain_name
try:
other_manager = getattr(client, manager_name)
except AttributeError:
# generic manager
other_manager = EntityManager(
target_endpoint,
client,
'')
if domain_name.endswith('s'):
new_method = functools.partial(other_manager._list, value)
else:
new_method = functools.partial(other_manager._get, value)
new_method.__name__ = domain_name
setattr(self, domain_name, new_method)
except KeyError:
print self.keys()
raise
class InvalidStatusCode(Exception):
def __init__(self, status_code, body):
self.status_code = status_code
self.body = body
def __str__(self):
if self.body is None:
return '%d' % self.status_code
elif type(self.body) == str:
return '%d : %s' % (self.status_code, self.body)
else:
return '%d : %s' % (self.status_code, json.dumps(self.body))
class EntityManager(object):
def __init__(self, target_endpoint, client, entity_uri, entity_builder=None):
self.target_endpoint = target_endpoint
self.entity_uri = entity_uri
self.client = client
self.entity_builder = entity_builder if entity_builder is not None else lambda pairs: Entity(target_endpoint,
client, pairs)
def _get(self, requested_path, entity_builder=None):
url = '%s%s' % (self.target_endpoint, requested_path)
response = EntityManager._check_response(self.client.get(url))
_logger.debug('GET - %s - %s', requested_path, response.text)
return self._read_response(response, entity_builder)
def _list(self, requested_path, entity_builder=None, **kwargs):
url_requested = EntityManager._get_url_filtered('%s%s' % (self.target_endpoint, requested_path), **kwargs)
response = EntityManager._check_response(self.client
.get(url_requested))
entity_builder = self._get_entity_builder(entity_builder)
while True:
_logger.debug('GET - %s - %s', url_requested, response.text)
response_json = self._read_response(response, JsonObject)
for resource in response_json['resources']:
yield entity_builder(resource.items())
if response_json['next_url'] is None:
break
else:
url_requested = '%s%s' % (self.target_endpoint, response_json['next_url'])
response = EntityManager._check_response(self.client.get(url_requested))
def _create(self, data):
url = '%s%s' % (self.target_endpoint, self.entity_uri)
response = EntityManager._check_response(self.client.post(url, json=data))
_logger.debug('POST - %s - %s', url, response.text)
return self._read_response(response)
def _update(self, resource_id, data):
url = '%s%s/%s' % (self.target_endpoint, self.entity_uri, resource_id)
response = EntityManager._check_response(self.client.put(url, json=data))
_logger.debug('PUT - %s - %s', url, response.text)
return self._read_response(response)
def _remove(self, resource_id):
url = '%s%s/%s' % (self.target_endpoint, self.entity_uri, resource_id)
response = EntityManager._check_response(self.client.delete(url))
_logger.debug('DELETE - %s - %s', url, response.text)
def __iter__(self):
return self.list()
def __getitem__(self, entity_guid):
return self.get(entity_guid)
def list(self, **kwargs):
return self._list(self.entity_uri, **kwargs)
def get_first(self, **kwargs):
kwargs.setdefault('results-per-page', 1)
for entity in self._list(self.entity_uri, **kwargs):
return entity
return None
def get(self, entity_id, *extra_paths):
if len(extra_paths) == 0:
requested_path = '%s/%s' % (self.entity_uri, entity_id)
else:
requested_path = '%s/%s/%s' % (self.entity_uri, entity_id, '/'.join(extra_paths))
return self._get(requested_path)
def _read_response(self, response, other_entity_builder=None):
entity_builder = self._get_entity_builder(other_entity_builder)
result = response.json(object_pairs_hook=JsonObject)
return entity_builder(result.items())
def _get_entity_builder(self, entity_builder):
if entity_builder is None:
return self.entity_builder
else:
return entity_builder
@staticmethod
def _get_url_filtered(url, **kwargs):
list_query_paramters = ['page', 'results-per-page', 'order-direction']
def _append_encoded_parameter(parameters, args):
parameter_name, parameter_value = args[0], args[1]
if parameter_name in list_query_paramters:
parameters.append('%s=%s' % (parameter_name, str(parameter_value)))
else:
parameters.append('q=%s' % quote('%s IN %s' % (parameter_name, str(parameter_value))))
return parameters
if len(kwargs) > 0:
return '%s?%s' % (url,
"&".join(reduce(_append_encoded_parameter, kwargs.items(), [])))
else:
return url
@staticmethod
def _check_response(response):
if response.status_code / 100 == 2:
return response
else:
try:
body = response.json()
except Exception:
body = response.text
raise InvalidStatusCode(response.status_code, body)
|
"""
Command-line tools for scitokens
"""
|
from App.Proxys import *
data = IKVMCController(
name = '',
controlParamsList = [
ControlParams( joint = 'root', kp = 1000.0, kd = 200.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'pelvis_lowerback', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lowerback_torso', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'torso_head', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 0.2, 1.0 ) ),
ControlParams( joint = 'lShoulder', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rShoulder', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lElbow', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ),
ControlParams( joint = 'rElbow', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ),
ControlParams( joint = 'lHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lKnee', kp = 300.0, kd = 35.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rKnee', kp = 300.0, kd = 35.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ),
ControlParams( joint = 'rAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ),
ControlParams( joint = 'lToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ) ],
states = [
SimBiConState(
name = 'State 0',
nextStateIndex = 0,
duration = 0.62,
externalForces = [ ],
trajectories = [
Trajectory(
joint = 'root',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory( joint = 'SWING_Hip', strength = [ ], components = [ ] ),
Trajectory(
joint = 'SWING_Knee',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Knee',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.1 ), ( 1.0, 0.1 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Ankle',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 1.19 ), ( 1.0, -0.56 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Ankle',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, -0.56 ), ( 1.0, 1.19 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Shoulder',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.4 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, -1.09190687384 ), ( 1.0, 2.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, -2.63817581809 ), ( 1.0, 0.25 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Shoulder',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.4 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, -2.0 ), ( 1.0, 1.09190687384 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.25 ), ( 1.0, -2.63817581809 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Elbow',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, 0.00402694999565 ), ( 1.0, 1.42770774061 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Elbow',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, -1.42770774061 ), ( 1.0, -0.00402694999565 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'pelvis_lowerback',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, -0.375744188714 ), ( 1.0, 0.375744188714 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 1.2 ), ( 1.0, 1.2 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'lowerback_torso',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.64363902446 ), ( 1.0, -0.64363902446 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 1.16725218285 ), ( 1.0, 1.16725218285 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'torso_head',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, -1.2 ), ( 1.0, 1.2 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.464482553944 ), ( 1.0, 0.464482553944 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_ToeJoint',
strength = [ ( 0.3, 0.1 ), ( 0.5, 0.1 ), ( 0.6, 1.0 ) ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_ToeJoint',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] )
]
)
],
sagittalTrajectory = [ ( 0.0, 0.0 ), ( 1.0, 0.0 ) ],
coronalTrajectory = [ ( 0.0, 0.0 ), ( 1.0, 0.0 ) ],
heightTrajectory = [ ( 0.0, 0.0 ), ( 1.0, 0.0 ) ]
)
|
"""restful_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include, url
urlpatterns = [
url(r'^cancer_predict/', include('cancer_predict.urls')),
url(r'^admin/', admin.site.urls),
]
|
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class NeutronPluginBaseV2(object):
@abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listener(self):
"""Start the rpc listener.
Most plugins start an RPC listener implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
def rpc_workers_supported(self):
"""Return whether the plugin supports multiple RPC workers.
A plugin that supports multiple RPC workers should override the
start_rpc_listener method to ensure that this method returns True and
that start_rpc_listener is called at the appropriate time.
Alternately, a plugin can override this method to customize detection
of support for multiple rpc workers
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_listener !=
NeutronPluginBaseV2.start_rpc_listener)
|
class IndicatorTypes():
"""
Vocabulary for Indicator Types.
"""
ADJUST_TOKEN = "Adjust Token"
API_KEY = "API Key"
AS_NUMBER = "AS Number"
AS_NAME = "AS Name"
BANK_ACCOUNT = "Bank account"
BITCOIN_ACCOUNT = "Bitcoin account"
CERTIFICATE_FINGERPRINT = "Certificate Fingerprint"
CERTIFICATE_NAME = "Certificate Name"
CHECKSUM_CRC16 = "Checksum CRC16"
CMD_LINE = "Command Line"
COMPANY_NAME = "Company name"
COOKIE_NAME = "Cookie Name"
COUNTRY = "Country"
CRX = "CRX"
DEBUG_PATH = "Debug Path"
DEBUG_STRING = "Debug String"
DEST_PORT = "Destination Port"
DEVICE_IO = "Device IO"
DOC_FROM_URL = "Document from URL"
DOMAIN = "Domain"
EMAIL_BOUNDARY = "Email Boundary"
EMAIL_ADDRESS = "Email Address"
EMAIL_FROM = "Email Address From"
EMAIL_HEADER_FIELD = "Email Header Field"
EMAIL_HELO = "Email HELO"
EMAIL_MESSAGE_ID = "Email Message ID"
EMAIL_ORIGINATING_IP = "Email Originating IP"
EMAIL_REPLY_TO = "Email Reply-To"
EMAIL_SENDER = "Email Address Sender"
EMAIL_SUBJECT = "Email Subject"
EMAIL_X_MAILER = "Email X-Mailer"
EMAIL_X_ORIGINATING_IP = "Email X-Originating IP"
FILE_CREATED = "File Created"
FILE_DELETED = "File Deleted"
FILE_MOVED = "File Moved"
FILE_NAME = "File Name"
FILE_OPENED = "File Opened"
FILE_PATH = "File Path"
FILE_READ = "File Read"
FILE_WRITTEN = "File Written"
GET_PARAM = "GET Parameter"
HEX_STRING = "HEX String"
HTML_ID = "HTML ID"
HTTP_REQUEST = "HTTP Request"
HTTP_RESP_CODE = "HTTP Response Code"
IMPHASH = "IMPHASH"
IPV4_ADDRESS = "IPv4 Address"
IPV4_SUBNET = "IPv4 Subnet"
IPV6_ADDRESS = "IPv6 Address"
IPV6_SUBNET = "IPv6 Subnet"
LATITUDE = "Latitude"
LAUNCH_AGENT = "Launch Agent"
LOCATION = "Location"
LONGITUDE = "Longitude"
MAC_ADDRESS = "MAC Address"
MALWARE_NAME = "Malware Name"
MD5 = "MD5"
MEMORY_ALLOC = "Memory Alloc"
MEMORY_PROTECT = "Memory Protect"
MEMORY_READ = "Memory Read"
MEMORY_WRITTEN = "Memory Written"
MUTANT_CREATED = "Mutant Created"
MUTEX = "Mutex"
NAME_SERVER = "Name Server"
OTHER_FILE_OP = "Other File Operation"
PASSWORD = "Password"
PASSWORD_SALT = "Password Salt"
PAYLOAD_DATA = "Payload Data"
PAYLOAD_TYPE = "Payload Type"
PIPE = "Pipe"
POST_DATA = "POST Data"
PROCESS_NAME = "Process Name"
PROTOCOL = "Protocol"
REFERER = "Referer"
REFERER_OF_REFERER = "Referer of Referer"
REGISTRAR = "Registrar"
REGISTRY_KEY = "Registry Key"
REG_KEY_CREATED = "Registry Key Created"
REG_KEY_DELETED = "Registry Key Deleted"
REG_KEY_ENUMERATED = "Registry Key Enumerated"
REG_KEY_MONITORED = "Registry Key Monitored"
REG_KEY_OPENED = "Registry Key Opened"
REG_KEY_VALUE_CREATED = "Registry Key Value Created"
REG_KEY_VALUE_DELETED = "Registry Key Value Deleted"
REG_KEY_VALUE_MODIFIED = "Registry Key Value Modified"
REG_KEY_VALUE_QUERIED = "Registry Key Value Queried"
SERVICE_NAME = "Service Name"
SHA1 = "SHA1"
SHA256 = "SHA256"
SMS_ORIGIN = "SMS Origin"
SOURCE_PORT = "Source Port"
SSDEEP = "SSDEEP"
TELEPHONE = "Telephone"
TIME_CREATED = "Time Created"
TIME_UPDATED = "Time Updated"
TRACKING_ID = "Tracking ID"
TS_END = "TS End"
TS_START = "TS Start"
URI = "URI"
USER_AGENT = "User Agent"
USER_ID = "User ID"
VICTIM_IP = "Victim IP"
VOLUME_QUERIED = "Volume Queried"
WEBSTORAGE_KEY = "Webstorage Key"
WEB_PAYLOAD = "Web Payload"
WHOIS_NAME = "WHOIS Name"
WHOIS_ADDR1 = "WHOIS Address 1"
WHOIS_ADDR2 = "WHOIS Address 2"
WHOIS_REGISTRANT_EMAIL_ADDRESS = "WHOIS Registrant Email Address"
WHOIS_TELEPHONE = "WHOIS Telephone"
XPI = "XPI"
class IndicatorThreatTypes():
"""
Vocabulary for Indicator Threat Types.
"""
BAD_ACTOR = "Bad Actor"
COMPROMISED_CREDENTIAL = "Compromised Credential"
COMMAND_EXEC = "Command Exec"
MALICIOUS_AD = "Malicious Ad"
MALICIOUS_CONTENT = "Malicious Content"
MALICIOUS_DOMAIN = "Malicious Domain"
MALICIOUS_INJECT = "Malicious Inject"
MALICIOUS_IP = "Malicious IP"
MALICIOUS_URL = "Malicious URL"
MALICIOUS_URLCHUNK = "Malicious URL Chunk"
MALWARE_ARTIFACTS = "Malware Artifacts"
MALWARE_SAMPLE = "Malware Sample"
MALWARE_VICTIM = "Malware Victim"
PROXY_IP = "Proxy IP"
SINKHOLE_EVENT = "Sinkhole Event"
SMS_SPAM = "SMS Spam"
UNKNOWN = "Unknown"
VICTIM_IP_USAGE = "Victim IP Usage"
WEB_REQUEST = "Web Request"
WHITELIST_DOMAIN = "Whitelist Domain"
WHITELIST_IP = "Whitelist IP"
WHITELIST_URL = "Whitelist URL"
class IndicatorAttackTypes():
"""
Vocabulary for Indicator Attack Types.
"""
ACCESS_TOKEN_THEFT = "Access Token Theft"
BRUTE_FORCE = "Brute Force"
CLICKJACKING = "Clickjacking"
EMAIL_SPAM = "Email Spam"
FAKE_ACCOUNTS = "Fake Accounts"
IP_INFRINGEMENT = "IP Infringement"
MALICIOUS_APP = "Malicious App"
MALWARE = "Malware"
PHISHING = "Phishing"
SELF_XSS = "Self XSS"
SHARE_BAITING = "Share Baiting"
TARGETED = "Targeted"
UNKNOWN = "Unknown"
class IndicatorCI():
"""
Vocabulary for Indicator CI.
"""
UNKNOWN = "unknown"
BENIGN = "benign"
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
|
from pyparsing import (
Forward, Combine, Optional, Word, Literal, CaselessKeyword,
CaselessLiteral, Group, FollowedBy, LineEnd, OneOrMore, ZeroOrMore,
alphas, alphanums, printables, delimitedList, quotedString, Regex,
__version__, Suppress, Empty
)
grammar = Forward()
expression = Forward()
intNumber = Regex(r'-?\d+')('integer')
floatNumber = Regex(r'-?\d+\.\d+')('float')
sciNumber = Combine(
(floatNumber | intNumber) + CaselessLiteral('e') + intNumber
)('scientific')
aString = quotedString('string')
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(sciNumber + afterNumber) |
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
none = Group(
CaselessKeyword('none')
)('none')
infinity = Group(
CaselessKeyword('inf')
)('infinity')
argname = Word(alphas + '_', alphanums + '_')('argname')
funcname = Word(alphas + '_', alphanums + '_')('funcname')
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
leftBrace = Literal('{')
rightBrace = Literal('}')
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
backslash = Literal('\\').suppress()
symbols = '''(){},.'"\\|'''
arg = Group(
boolean |
number |
none |
aString |
infinity |
expression
)('args*')
kwarg = Group(argname + equal + arg)('kwargs*')
args = delimitedList(~kwarg + arg) # lookahead to prevent failing on equals
kwargs = delimitedList(kwarg)
def setRaw(s, loc, toks):
toks[0].raw = s[toks[0].start:toks[0].end]
call = Group(
Empty().setParseAction(lambda s, l, t: l)('start') +
funcname + leftParen +
Optional(
args + Optional(
comma + kwargs
)
) + rightParen +
Empty().leaveWhitespace().setParseAction(lambda s, l, t: l)('end')
).setParseAction(setRaw)('call')
validMetricChars = ''.join((set(printables) - set(symbols)))
escapedChar = backslash + Word(symbols + '=', exact=1)
partialPathElem = Combine(
OneOrMore(
escapedChar | Word(validMetricChars)
)
)
matchEnum = Combine(
leftBrace +
delimitedList(partialPathElem, combine=True) +
rightBrace
)
pathElement = Combine(
Group(partialPathElem | matchEnum) +
ZeroOrMore(matchEnum | partialPathElem)
)
pathExpression = delimitedList(pathElement, delim='.', combine=True)('pathExpression')
litarg = Group(
number | aString
)('args*')
litkwarg = Group(argname + equal + litarg)('kwargs*')
litargs = delimitedList(~litkwarg + litarg) # lookahead to prevent failing on equals
litkwargs = delimitedList(litkwarg)
template = Group(
Literal('template') + leftParen +
(call | pathExpression) +
Optional(comma + (litargs | litkwargs)) +
rightParen
)('template')
pipeSep = ZeroOrMore(Literal(' ')) + Literal('|') + ZeroOrMore(Literal(' '))
pipedExpression = Group(
(template | call | pathExpression) +
Group(ZeroOrMore(Suppress(pipeSep) + Group(call)('pipedCall')))('pipedCalls')
)('expression')
if __version__.startswith('1.'):
expression << pipedExpression
grammar << expression
else:
expression <<= pipedExpression
grammar <<= expression
def enableDebug():
for name, obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except Exception:
pass
|
"""
This is dummy code to demonstrate the usefullness of classes
"""
import sys
import numpy as np
import calc_density
import opacity_interpolation
import utilities as utils
class star(object):
name = "Attributes out of the init function are shared by all objects!"
def __init__(self, core_pressure, core_temp, total_lum, total_radius, total_mass, h_mass, he_mass):
"""
Things we know a priori about the star.
Units:
Core Pressure: g cm^-2
Core Temperature: K
Total Luminosity: erg s^-1
Total Radius: cm
Total Mass: g
"""
self.core_pressure = core_pressure
self.core_temp = core_temp
self.total_lum = total_lum
self.total_radius = total_radius
self.total_mass = total_mass
self.h_mass = h_mass
self.he_mass = he_mass
self.teff = None
def calc_teff(self):
return (self.total_lum/(4*np.pi*utils.sb_const*self.total_radius**2))**(1./4.)
# Need a more descriptive name for this function
def calc_other_pressure(self, opacity):
return (2*utils.g_const*self.total_mass)/(3*opacity*self.total_radius**2)
def pressure_from_ideal(self, density):
mu = calc_density.mu_is(self.h_mass, self.he_mass)
return (density*utils.b_const*self.teff)/(mu*utils.mass_of_hydrogen)
def calc_luminosity(self, radius, temperature):
return 4.*np.pi*(radius**2)*utils.sb_const*temperature**4.
def calc_del_rad(self, density, pressure, temperature, opacity, luminosity, mass):
term1 = 3/(16*np.pi*utils.radiation_density_constant*utils.light_speed*utils.g_const)
term2 = (opacity*luminosity*pressure)/(mass*temperature**4)
return term1*term2
def inward_start(star):
"""
Computing the initial guesses at the surface.
Outer radius and total luminosity
"""
surface_density = 10e-7
# Make an array of density values
densities = np.linspace(1e-9, surface_density, 1e5)
# Calculate corresponding pressure arrays
pressures1 = np.asarray([star.pressure_from_ideal(density) for density in densities])
# Calculate an array of opacity values for computing pressure2
opacities = [10**opacity_interpolation.opacity_is(np.log10(star.teff), np.log10(density)) for density in densities]
pressures2 = np.asarray([star.calc_other_pressure(opacity) for opacity in opacities])
intersection_index = np.abs(pressures1 - pressures2).argmin(0)
surface_pressure = (pressures1[intersection_index] + pressures2[intersection_index]) / 2
surface_density = densities[intersection_index]
return [surface_pressure, star.teff, star.total_radius, star.total_lum]
def derivatives(layer, enclosed_mass, star):
"""
The enclosed_mass given should be the enclosed enclosed_mass, deal with that
outside the function.
"""
density = calc_density.density_is(np.log10(layer[1]), np.log10(layer[0]), star.h_mass, star.he_mass)
opacity = 10**opacity_interpolation.opacity_is(np.log10(layer[1]), np.log10(density))
dpressure = -((utils.g_const)/(4*np.pi))*((enclosed_mass)/(layer[2]**4))
dradius = (1./(4.*np.pi))*(1./(density*layer[2]**2))
dluminosity = utils.calc_e_n(star, density, layer[1])
del_rad = star.calc_del_rad(density, layer[0], layer[1], opacity, layer[3], enclosed_mass)
if del_rad >= utils.del_ad:
dtemperature = -((utils.g_const*enclosed_mass*layer[1])/(4*np.pi*layer[0]*layer[2]**4))*utils.del_ad
else:
dtemperature = -((utils.g_const*enclosed_mass*layer[1])/(4*np.pi*layer[0]*layer[2]**4))*del_rad
return [dpressure, dtemperature, dradius, dluminosity]
solar_2x = star(1.6032636e17, 20.47409576e6, 15.51844053*(3.846e33), 1.66086519*(7e10), 2*(1.98e33), 0.70, 0.28)
print solar_2x.name
solar_2x.teff = solar_2x.calc_teff()
print solar_2x.teff # equals 8872.57
weird_star = star(1, 2, 3, 4, 5, 6, 7)
print weird_star.name # Prints the same thing even though it's a super weird star
weird_star.teff = weird_star.calc_teff()
print weird_star.teff # equals 4.026 burr!
surface_initial = inward_start(solar_2x)
surface_values = derivatives(surface_initial, solar_2x.total_mass, solar_2x,)
|
from uaitrain.operation.base_op import BaseUAITrainOp
from uaitrain.api.get_train_job_running_info import GetUAITrainRunningInfoApiOp
class BaseUAITrainRunningInfoOp(BaseUAITrainOp):
def __init__(self, parser):
super(BaseUAITrainRunningInfoOp, self).__init__(parser)
def _add_job_info_args(self, info_parser):
job_info_parser = info_parser.add_argument_group(
'Job Info Params', 'Job Infos')
job_info_parser.add_argument(
'--job_id',
type=str,
required=True,
help='The <job_id> to show Job Info')
def _add_args(self):
parser = self.parser.add_parser('info', help='Show UAI Train Job Info')
self.info_parser = parser
self._add_account_args(parser)
self._add_job_info_args(parser)
def _parse_args(self, args):
super(BaseUAITrainRunningInfoOp, self)._parse_args(args)
self.job_id = args['job_id']
return True
def _format_info(self, job_id, resp):
exec_time = resp['ExecTime']
cost = resp['TotalPrice']
print('JOB_ID: {0}; ExecTime: {1} secs; Total Cost: {2} yuan;'.format(
job_id,
exec_time,
float(cost) / 100))
def cmd_run(self, args):
if self._parse_args(args) == False:
return False
info_op = GetUAITrainRunningInfoApiOp(
pub_key=self.pub_key,
priv_key=self.pri_key,
job_id=self.job_id,
project_id=self.project_id,
region=self.region,
zone=self.zone)
succ, resp = info_op.call_api()
if succ is False:
print("Error get job info of {0}, check your job_id".format(self.job_id))
return False
self._format_info(self.job_id, resp)
return True
|
import time
import uuid
import unittest2
from networking_fortinet.tasks import tasks
class TestTasks(unittest2.TestCase):
def setUp(self, tasks_id=None):
super(TestTasks, self).setUp()
self.tasks_id = tasks_id if tasks_id else str(uuid.uuid1())
self.tasks = tasks.Tasks(self.tasks_id)
def tearDown(self, id=None):
pass
def test_register(self, **subtask):
"""
subtask is a dictory, include two parts, func and params, it will
be executed like subtask['func'](*subtask[params]), the following
is a example format of subtask:
'subtask':
{'params': (
<api_client.client.FortiosApiClient object at 0x2a14a90>,
{'id': 2, 'vdom': 'root'}
),
'func': <function wrapper at 0x2b62ed8>
}
"""
if not subtask:
subtask = {
'params': 1,
'func': time.sleep
}
self.tasks.register(**subtask)
self.assertIn(subtask, self.tasks._tasks)
def test_register_existing_task(self, **subtask):
"""
subtask is a dictory, include two parts, func and params, it will
be executed like subtask['func'](*subtask[params]), the following
is a example format of subtask:
'subtask':
{'params': (
<api_client.client.FortiosApiClient object at 0x2a14a90>,
{'id': 2, 'vdom': 'root'}
),
'func': <function wrapper at 0x2b62ed8>
}
"""
if not subtask:
subtask = {
'params': 1,
'func': time.sleep
}
self.tasks.register(**subtask)
self.tasks.register(**subtask)
count = self.tasks._tasks.count(subtask)
self.assertEqual(1, count)
if __name__ == '__main__':
unittest2.main()
|
"""@package src.wi.tests.images_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 24.11.2012
"""
from wi.tests import WiTestCase
import groups_test
import vm_test
import unittest
import random
class ImagesTests(WiTestCase, unittest.TestCase):
@staticmethod
def _test_upload_image_private(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tfoot/tr/td/ul/li/a", ["Upload image"])
driver.find_element_by_link_text("Upload image").click()
self.wait_for_text("//div[@id='dialog-div']/p", ["Please specify image parameters:"])
name = "witest_image" + str(random.randint(1, 100000))
driver.find_element_by_id("id_name").clear()
driver.find_element_by_id("id_name").send_keys(name)
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys(self.iso)
driver.find_element_by_id("id_path").clear()
driver.find_element_by_id("id_path").send_keys(self.iso)
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["Image upload started."])
driver.find_element_by_link_text("Logout").click()
return name
def _test_edit_image_private(self, name):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tbody", [name])
self.menu_click("Name", name, "Edit", {"dict": {"Size": "B"}, "path": "//table[@id='item-list']/tbody"})
self.wait_for_text("//div[@id='dialog-div']/p", ["Edit image data"])
newname = "new_witest_image" + str(random.randint(1, 100000))
driver.find_element_by_id("id_name").clear()
driver.find_element_by_id("id_name").send_keys(newname)
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully edited this image."])
driver.find_element_by_link_text("Logout").click()
return newname
@staticmethod
def _test_remove_image_private(self, name):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tbody", [name])
self.menu_click("Name", name, "Remove")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you really want to delete image"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully removed image"])
driver.find_element_by_link_text("Logout").click()
def _test_create_vm_from_image(self, name):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tbody", [name])
self.menu_click("Name", name, "Create virtual machine", {"dict": {"Size": "B"}, "path": "//table[@id='item-list']/tbody"})
self.wait_for_text("//div[@id='item-list']/div[2]/table/tbody", ["small"])
self.cell_click("Name", "small", None, element="a",
path_head_tds="//div[@id='item-list']/div[1]/table/tbody/tr/td",
path_body_trs="//div[@id='item-list']/div[2]/table/tbody/tr")
self.wait_for_text("//form[@id='wizard-form']/div[2]/fieldset/div/span/label", ["Assign IP address"])
driver.find_element_by_xpath("//div[@id='submit-div']/input").click()
self.wait_for_text("//form[@id='wizard-form']/div[2]/fieldset/div/span/label", ["Name"])
name = "witest_vm" + str(random.randint(1, 100000))
driver.find_element_by_id("id_3-name").clear()
driver.find_element_by_id("id_3-name").send_keys(name)
driver.find_element_by_css_selector("input.big_button").click()
self.wait_for_message(["Virtual machine is being created."])
driver.find_element_by_link_text("Logout").click()
return name
def _test_group_image_private(self, name, group_name):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tbody", [name])
self.menu_click("Name", name, "Assign to group", {"dict": {"Size": "B"}, "path": "//table[@id='item-list']/tbody"})
self.wait_for_text("//div[@id='dialog-div']/p", ["Enter a name of group for image"])
driver.find_element_by_xpath("//div[@id='dialog-div']/form/div/fieldset/div/span[2]/a/span").click()
driver.find_element_by_xpath("//a[contains(text(),'" + group_name + "')]").click()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully assigned image "])
driver.find_element_by_link_text("Logout").click()
def _test_ungroup_image_private(self, name):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_USER)
driver.get(self.base_url + "/images/images_private/")
self.wait_for_text("//table[@id='item-list']/tbody", [name])
self.menu_click("Name", name, "Move to my images")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to make image"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully revoked group's assigment."])
driver.find_element_by_link_text("Logout").click()
def test_1_simple(self):
name = self._test_upload_image_private(self)
newname = self._test_edit_image_private(name)
self._test_remove_image_private(self, newname)
def test_2_create_vm(self):
name = self._test_upload_image_private(self)
vmname = self._test_create_vm_from_image(name)
vm_test.VMTests._test_destroy_vm(self, vmname)
self._test_remove_image_private(self, name)
def test_3_group(self):
name = self._test_upload_image_private(self)
group_name = groups_test.GroupsTests._test_create_group(self)
self._test_group_image_private(name, group_name)
self._test_ungroup_image_private(name)
groups_test.GroupsTests._test_remove_group(self, group_name, who=self.TEST_USER)
self._test_remove_image_private(self, name)
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloud_computing.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import os
import logging
import multiprocessing as mp
from typing import Sequence, Dict, Optional, Tuple, List, Union, Any
import numpy as np
from .metrics.base import Metric, Score, Signature
IS_WINDOWS = os.name == 'nt'
sacrelogger = logging.getLogger('sacrebleu')
class Result:
"""A container to represent results from a particular statistical
significance test.
:param score: The floating point score for the system at hand.
:param p_value: If exists, represents the p-value when the system at
hand is compared to a baseline using a paired test.
:param mean: When paired bootstrap test is applied, this represents
the true mean score estimated from bootstrap resamples of the system.
:param ci: When paired bootstrap test is applied, this represents
the 95% confidence interval around the true mean score `sys_mean`.
"""
def __init__(self, score: float, p_value: Optional[float] = None,
mean: Optional[float] = None, ci: Optional[float] = None):
self.score = score
self.p_value = p_value
self.mean = mean
self.ci = ci
def __repr__(self):
return ','.join([f'{k}={str(v)}' for k, v in self.__dict__.items()])
def estimate_ci(scores: np.ndarray) -> Tuple[float, float]:
"""Takes a list of scores and returns mean and 95% confidence
interval around the mean.
:param scores: A list of floating point scores.
:return: A tuple of mean and the 95% CI.
"""
# Sort the scores
scores = np.sort(scores)
n = len(scores)
# Get CI bounds (95%, i.e. 1/40 from left)
lower_idx = n // 40
upper_idx = n - lower_idx - 1
lower, upper = scores[lower_idx], scores[upper_idx]
ci = 0.5 * (upper - lower)
return (scores.mean(), ci)
def _bootstrap_resample(stats: List[List[Union[int, float]]],
metric: Metric, n_samples: int = 1000) -> Tuple[str, List[Score]]:
"""Performs bootstrap resampling for a single system to estimate
a confidence interval around the true mean.
:param stats: A list of statistics extracted from the system's hypotheses.
:param metric: The `Metric` instance to be used for score computation.
:n_samples: Number of bootstrap resamples to use.
:return: A tuple of the seed choice as string and the list of `Score`
instances for all bootstrap resamples.
"""
# Set numpy RNG's seed
# If given -> Fix to the given value
# If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS
seed = os.environ.get('SACREBLEU_SEED', '12345')
_seed = None if seed.lower() == 'none' else int(seed)
rng = np.random.default_rng(_seed)
# The indices that'll produce all bootstrap resamples at once
idxs = rng.choice(len(stats), size=(n_samples, len(stats)), replace=True)
# convert to numpy array. float32 is more efficient
stats = np.array(stats, dtype='float32')
# recompute scores for all resamples
scores = [
metric._compute_score_from_stats(_s.sum(0)) for _s in stats[idxs]]
return str(seed).lower(), scores
def _compute_p_value(stats: np.ndarray, real_difference: float) -> float:
"""Computes the p-value given the sample statistics and the real statistic.
:param stats: A numpy array with the sample statistics.
:real_difference: The real statistic.
:return: The p-value.
"""
# Taken from: significance/StratifiedApproximateRandomizationTest.java
# https://github.com/jhclark/multeval.git
# "the != is important. if we want to score the same system against itself
# having a zero difference should not be attributed to chance."
c = np.sum(stats > real_difference)
# "+1 applies here, though it only matters for small numbers of shufflings,
# which we typically never do. it's necessary to ensure the probability of
# falsely rejecting the null hypothesis is no greater than the rejection
# level of the test (see william and morgan on significance tests)
p = (c + 1) / (len(stats) + 1)
return p
def _paired_ar_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]],
sys_name: str,
hypotheses: Sequence[str],
references: Optional[Sequence[Sequence[str]]],
metrics: Dict[str, Metric],
n_samples: int = 10000,
n_ar_confidence: int = -1,
seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]:
"""Paired two-sided approximate randomization (AR) test for MT evaluation.
:param baseline_info: A dictionary with `Metric` instances as the keys,
that contains sufficient statistics and a `Result` instance for the baseline system.
:param sys_name: The name of the system to be evaluated.
:param hypotheses: A sequence of string hypotheses for the system.
:param references: A sequence of reference documents with document being
defined as a sequence of reference strings. If `None`, references
will be used through each metric's internal cache.
:param metrics: A dictionary of `Metric` instances that will be computed
for each system.
:param n_samples: The number of AR trials.
:param n_ar_confidence: The number of bootstrap resamples to use for
confidence estimation. A value of -1 disables confidence estimation.
:param seed: The seed value for the RNG. If `None`, the RNG will not be
fixed to a particular seed.
:return: A tuple with first element being the system name and the second
being a `Result` namedtuple.
"""
# Seed the RNG
rng = np.random.default_rng(seed)
# Generate indices that'll select stats
pos_sel = rng.integers(2, size=(n_samples, len(hypotheses)), dtype=bool)
# Flip mask to obtain selectors for system hypotheses
neg_sel = ~pos_sel
if n_ar_confidence > 0:
# Perform confidence estimation as well
bs_idxs = rng.choice(
len(hypotheses), size=(n_ar_confidence, len(hypotheses)), replace=True)
results = {}
for name, metric in metrics.items():
# Use pre-computed match stats for the baseline
bl_stats, bl_result = baseline_info[name]
# Compute system's stats and score
sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics')
sys_stats = metric._extract_corpus_statistics(hypotheses, references)
sys_score = metric._aggregate_and_compute(sys_stats)
# original test statistic: absolute difference between baseline and the system
diff = abs(bl_result.score - sys_score.score)
sacrelogger.info(f' > Performing approximate randomization test (# trials: {n_samples})')
# get shuffled pseudo systems
shuf_a = pos_sel @ bl_stats + neg_sel @ sys_stats
shuf_b = neg_sel @ bl_stats + pos_sel @ sys_stats
# Aggregate trial stats and compute scores for each
scores_a = np.array(
[metric._aggregate_and_compute(x).score for x in shuf_a[:, None]])
scores_b = np.array(
[metric._aggregate_and_compute(x).score for x in shuf_b[:, None]])
# Count the statistical difference and compute the p-value
p = _compute_p_value(
np.abs(np.array(scores_a) - np.array(scores_b)), diff)
res = Result(sys_score.score, p)
if n_ar_confidence > 0:
sacrelogger.info(f' > Performing bootstrap resampling for confidence interval (# resamples: {n_ar_confidence})')
sys_stats = np.array(sys_stats, dtype='float32')
# recompute scores for all resamples
sys_scores = [
metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[bs_idxs]]
res.mean, res.ci = estimate_ci(sys_scores)
# Store the result
results[name] = res
return sys_name, results
def _paired_bs_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]],
sys_name: str,
hypotheses: Sequence[str],
references: Optional[Sequence[Sequence[str]]],
metrics: Dict[str, Metric],
n_samples: int = 1000,
n_ar_confidence: int = -1,
seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]:
"""Paired bootstrap resampling test for MT evaluation. This function
replicates the behavior of the Moses script called
`bootstrap-hypothesis-difference-significance.pl`.
:param baseline_info: A dictionary with `Metric` instances as the keys,
that contains sufficient statistics and a `Result` instance for the baseline system.
:param sys_name: The name of the system to be evaluated.
:param hypotheses: A sequence of string hypotheses for the system.
:param references: A sequence of reference documents with document being
defined as a sequence of reference strings. If `None`, references
will be used through each metric's internal cache.
:param metrics: A dictionary of `Metric` instances that will be computed
for each system.
:param n_samples: The number of bootstrap resamples.
:param n_ar_confidence: This parameter is not used for this function but
is there for signature compatibility in the API.
:param seed: The seed value for the RNG. If `None`, the RNG will not be
fixed to a particular seed.
:return: A tuple with first element being the system name and the second
being a `Result` namedtuple.
"""
# Seed the RNG
rng = np.random.default_rng(seed)
results = {}
# It takes ~10ms to generated the indices
idxs = rng.choice(
len(hypotheses), size=(n_samples, len(hypotheses)), replace=True)
for name, metric in metrics.items():
# Use pre-computed match stats for the baseline
bl_stats, bl_result = baseline_info[name]
# Compute system's stats and score
sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics')
sys_stats = metric._extract_corpus_statistics(hypotheses, references)
sys_score = metric._aggregate_and_compute(sys_stats)
# Convert to numpy arrays for efficient indexing
sys_stats = np.array(sys_stats, dtype='float32')
bl_stats = np.array(bl_stats, dtype='float32')
# original test statistic: absolute difference between baseline and the system
diff = abs(bl_result.score - sys_score.score)
sacrelogger.info(f' > Performing paired bootstrap resampling test (# resamples: {n_samples})')
scores_bl = np.array(
[metric._compute_score_from_stats(_s.sum(0)).score for _s in bl_stats[idxs]])
scores_sys = np.array(
[metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[idxs]])
# Compute CI as well
sys_mean, sys_ci = estimate_ci(scores_sys)
# Compute the statistics
sample_diffs = np.abs(scores_sys - scores_bl)
stats = sample_diffs - sample_diffs.mean()
# Count the statistical difference and compute the p-value
p = _compute_p_value(stats, diff)
results[name] = Result(sys_score.score, p, sys_mean, sys_ci)
return sys_name, results
class PairedTest:
"""This is the manager class that will call the actual standalone implementation
for approximate randomization or paired bootstrap resampling, based on the
`test_type` argument.
:param named_systems: A lisf of (system_name, system_hypotheses) tuples on
which the test will be applied.
:param metrics: A dictionary of `Metric` instances that will be computed
for each system.
:param references: A sequence of reference documents with document being
defined as a sequence of reference strings. If `None`, already cached references
will be used through each metric's internal cache.
:param test_type: `ar` for approximate randomization, `bs` for paired bootstrap.
:param n_samples: The number of AR trials (for `ar`) or bootstrap resamples (for `bs`).
The defaults (10000 or 1000 respectively) will be used if 0 is passed.
:param n_ar_confidence: If `approximate randomization` is selected, the number
of bootstrap resamples to use for confidence estimation. A value of -1 disables
confidence estimation. 0 will use the default of 1000.
:param n_jobs: If 0, a worker process will be spawned for each system variant.
If > 0, the number of workers will be set accordingly. The default of 1
does not use multi-processing.
"""
_DEFAULT_SAMPLES = {
'ar': 10000,
'bs': 1000,
}
def __init__(self, named_systems: List[Tuple[str, Sequence[str]]],
metrics: Dict[str, Metric],
references: Optional[Sequence[Sequence[str]]],
test_type: str = 'ar',
n_samples: int = 0,
n_ar_confidence: int = -1,
n_jobs: int = 1):
assert test_type in ('ar', 'bs'), f"Unknown test type {test_type!r}"
self.test_type = test_type
# Set method
if self.test_type == 'ar':
self._fn = _paired_ar_test
elif self.test_type == 'bs':
self._fn = _paired_bs_test
# Set numpy RNG's seed
# If given -> Fix to the given value
# If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS
seed = os.environ.get('SACREBLEU_SEED', '12345')
self._seed = None if seed.lower() == 'none' else int(seed)
self.n_jobs = n_jobs
self.references = references
self.named_systems = named_systems
# Set the defaults if requested
self.n_ar_confidence = n_ar_confidence if n_ar_confidence != 0 else \
self._DEFAULT_SAMPLES['bs']
self.n_samples = n_samples if n_samples > 0 else \
self._DEFAULT_SAMPLES[self.test_type]
# Number of systems (excluding the baseline)
self.n_systems = len(named_systems) - 1
# Decide on number of workers
if IS_WINDOWS:
sacrelogger.warning('Parallel tests are not supported on Windows.')
self.n_jobs = 1
elif self.n_jobs == 0:
# Decide automatically
# Divide by two to ignore hyper-threading
n_max_jobs = mp.cpu_count() // 2
if n_max_jobs == 0:
self.n_jobs = 1
else:
# Don't use more workers than the number of CPUs
self.n_jobs = min(n_max_jobs, self.n_systems)
self._signatures: Dict[str, Signature] = {}
self._baseline_info: Dict[str, Tuple[Any, Result]] = {}
##################################################
# Pre-compute and cache baseline system statistics
##################################################
self.metrics = {}
bl_name, bl_hyps = self.named_systems[0]
for name, metric in metrics.items():
sacrelogger.info(f'Pre-computing {name} statistics for {bl_name!r}')
bl_stats = metric._extract_corpus_statistics(bl_hyps, self.references)
bl_score = metric._aggregate_and_compute(bl_stats)
# Compute CI for the baseline here once
confidence_n = self.n_samples if self.test_type == 'bs' \
else self.n_ar_confidence
bl_mean, bl_ci = None, None
if confidence_n > 0:
_, bl_scores = _bootstrap_resample(bl_stats, metric, confidence_n)
bl_mean, bl_ci = estimate_ci(np.array([x.score for x in bl_scores]))
result = Result(bl_score.score, mean=bl_mean, ci=bl_ci)
# Use updated name for the metric
self._baseline_info[bl_score.name] = (bl_stats, result)
self.metrics[bl_score.name] = metric
# Update metric signature as well
sig = metric.get_signature()
sig.update('seed', str(self._seed).lower())
# Num samples for bs, num trials for AR
sig.update(self.test_type, self.n_samples)
if self.n_ar_confidence > 0:
# Bootstrap is used for AR CI as well
sig.update('bs', self.n_ar_confidence)
self._signatures[bl_score.name] = sig
def __call__(self) -> Tuple[Dict[str, Signature], Dict[str, List[Union[str, Result]]]]:
"""Runs the paired test either on single or multiple worker processes."""
tasks = []
scores: Dict[str, List[Union[str, Result]]] = {}
# Add the name column
scores['System'] = [ns[0] for ns in self.named_systems]
# Store baseline results as the first position
for metric, (_, result) in self._baseline_info.items():
scores[metric] = [result]
# Prepare list of arguments for each comparison
# Skip the baseline (pos: 0)
for idx, (name, hyps) in enumerate(self.named_systems[1:]):
seed = self._seed if self._seed else None
tasks.append(
(self._baseline_info, name, hyps, self.references,
self.metrics, self.n_samples, self.n_ar_confidence, seed))
# Run the test(s)
if self.n_jobs == 1:
results = [self._fn(*args) for args in tasks]
else:
# NOTE: The overhead of worker creation is not negligible
# but if you have many systems and TER enabled, this significantly
# speeds up the test.
# NOTE: This only works on Linux/Mac OS X but not Windows. Windows only
# supports `spawn` backend which requires things to be called
# from within __main__.
sacrelogger.info(f'Launching {self.n_jobs} parallel workers.')
with mp.get_context('fork').Pool(self.n_jobs) as pool:
jobs = [pool.apply_async(self._fn, args) for args in tasks]
# wait for completion
results = [j.get() for j in jobs]
# Keep the order deterministic
for sys_name, sys_results in results:
for metric, _result in sys_results.items():
scores[metric].append(_result)
return self._signatures, scores
|
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
from zipline.api import order_target, record, symbol
from zipline.finance import commission, slippage
def initialize(context):
context.sym = symbol('AAPL')
context.i = 0
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = data.history(context.sym, 'price', 100, '1d').mean()
long_mavg = data.history(context.sym, 'price', 300, '1d').mean()
# Trading logic
if short_mavg > long_mavg:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg < long_mavg:
order_target(context.sym, 0)
# Save values for later inspection
record(AAPL=data.current(context.sym, "price"),
short_mavg=short_mavg,
long_mavg=long_mavg)
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if ('AAPL' in results and 'short_mavg' in results and
'long_mavg' in results):
results['AAPL'].plot(ax=ax2)
results[['short_mavg', 'long_mavg']].plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[[t[0]['amount'] > 0 for t in
trans.transactions]]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]]
ax2.plot(buys.index, results.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, results.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
plt.legend(loc=0)
else:
msg = 'AAPL, short_mavg & long_mavg data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2011', tz='utc'),
'end': pd.Timestamp('2013', tz='utc'),
}
|
from google.cloud import aiplatform_v1beta1
async def sample_create_artifact():
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateArtifactRequest(
parent="parent_value",
)
# Make the request
response = await client.create_artifact(request=request)
# Handle the response
print(response)
|
import sys
import json
import time
import anchore.anchore_utils
gate_name = "ANCHORESEC"
triggers = {
'VULNLOW':
{
'description':'triggers if a vulnerability of LOW severity is found',
'params':'none'
},
'VULNMEDIUM':
{
'description':'triggers if a vulnerability of MED severity is found',
'params':'none'
},
'VULNHIGH':
{
'description':'triggers if a vulnerability of HIGH severity is found',
'params':'none'
},
'VULNCRITICAL':
{
'description':'triggers if a vulnerability of CRITICAL severity is found',
'params':'none'
},
'VULNUNKNOWN':
{
'description':'triggers if a vulnerability of UNKNOWN severity is found',
'params':'none'
},
'FEEDOUTOFDATE':
{
'description':'triggers if the CVE data is older than the window specified by the parameter MAXAGE (unit is number of days)',
'params':'MAXAGE'
},
'UNSUPPORTEDDISTRO':
{
'description':'triggers if a vulnerability scan cannot be run against the image due to lack of vulnerability feed data for the images distro',
'params':'none'
},
}
try:
config = anchore.anchore_utils.init_gate_cmdline(sys.argv, gate_name, gate_help=triggers)
except Exception as err:
print str(err)
sys.exit(1)
if not config:
print "ERROR: could not set up environment for gate"
sys.exit(1)
imgid = config['imgid']
try:
params = config['params']
except:
params = None
parsed_params = {}
if params:
for paramstr in params:
try:
key, val = paramstr.split("=")
parsed_params[key] = list()
for p in val.split(","):
parsed_params[key].append(p)
except:
pass
try:
last_update, distro, cve_data = anchore.anchore_utils.cve_load_data(imgid)
report = anchore.anchore_utils.cve_scanimage(cve_data, imgid)
except Exception as err:
import traceback
traceback.print_exc()
print "ERROR: could not scan image for CVEs: " + str(err)
outlist = list()
outlist.append("UNSUPPORTEDDISTRO cannot perform CVE scan: "+str(err))
anchore.anchore_utils.save_gate_output(imgid, gate_name, outlist)
sys.exit(0)
outlist = list()
if 'MAXAGE' in parsed_params:
try:
for minage in parsed_params['MAXAGE']:
mintime = time.time() - int(int(minage) * 86400)
if last_update < mintime:
outlist.append("FEEDOUTOFDATE The vulnerability feed for this image distro is older than MAXAGE ("+str(minage)+") days")
except Exception as err:
outlist.append("FEEDOUTOFDATE Cannot perform data feed up-to-date check - message from server: " + str(err))
for k in report.keys():
for cvepkg in report[k]:
vuln = cvepkg
cve = k
pkg = vuln['pkgName']
sev = vuln['severity']
url = vuln['url']
if sev == 'Low':
t = "VULNLOW"
#tt = "PKGVULNLOW"
elif sev == 'Medium':
t = "VULNMEDIUM"
#tt = "PKGVULNMEDIUM"
elif sev == "High":
t = "VULNHIGH"
#tt = "PKGVULNHIGH"
elif sev == "Critical":
t = "VULNCRITICAL"
#tt = "PKGVULNCRITICAL"
else:
t = "VULNUNKNOWN"
#tt = "PKGVULNUNKNOWN"
d = {'id':cve+"+"+pkg, 'desc':sev + " Vulnerability found in package - " + pkg + " (" + cve + " - " + url + ")"}
outlist.append(t + " " + json.dumps(d))
#d = {'id':cve+"+"+pkg, 'desc':sev + " Vulnerability found in package - " + pkg + " (" + cve + " - " + url + ")"}
#outlist.append(tt + " " + json.dumps(d))
anchore.anchore_utils.save_gate_output(imgid, gate_name, outlist)
sys.exit(0)
|
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class HypervisorViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('extension_supported',
'hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_index(self):
hypervisors = self.hypervisors.list()
services = self.services.list()
stats = self.hypervisors.stats
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertTemplateUsed(res, 'admin/hypervisors/index.html')
hypervisors_tab = res.context['tab_group'].get_tab('hypervisor')
self.assertItemsEqual(hypervisors_tab._tables['hypervisors'].data,
hypervisors)
host_tab = res.context['tab_group'].get_tab('compute_host')
host_table = host_tab._tables['compute_host']
compute_services = [service for service in services
if service.binary == 'nova-compute']
self.assertItemsEqual(host_table.data, compute_services)
actions_host_up = host_table.get_row_actions(host_table.data[0])
self.assertEqual(0, len(actions_host_up))
actions_host_down = host_table.get_row_actions(host_table.data[1])
self.assertEqual(1, len(actions_host_down))
self.assertEqual('evacuate', actions_host_down[0].name)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_service_list_unavailable(self):
"""test that error message should be returned when
nova.service_list isn't available
"""
hypervisors = self.hypervisors.list()
stats = self.hypervisors.stats
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
resp = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertMessageCount(resp, error=1, warning=0)
class HypervisorDetailViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_search',)})
def test_index(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
api.nova.hypervisor_search(
IsA(http.HttpRequest), hypervisor).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:hypervisors:detail', args=[hypervisor])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/hypervisors/detail.html')
self.assertItemsEqual(res.context['table'].data, [])
|
import urllib3
import socket
import struct
import logging
from urllib3.packages.six.moves.queue import Empty
urllib3.disable_warnings()
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.CRITICAL)
class HTTPConnPool(urllib3.HTTPConnectionPool):
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
conn.close()
except Empty:
pass
class HTTPSConnPool(urllib3.HTTPSConnectionPool):
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
conn.close()
except Empty:
pass
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0004_auto_20160719_0254'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='email',
field=models.CharField(default=b'', max_length=255, unique=True),
),
]
|
from fabric.api import task, local
@task
def stat():
"""create statistics for pep8"""
local(
"pep8 --statistics --filename *.py */*.py */*/*.py */*/*/*.py */*/*/*/*.py */*/*/*/*/*.py")
@task
def auto():
"""run autopep8 on all python files"""
local("autopep8 -i */*.py")
local("autopep8 -i */*/*.py")
local("autopep8 -i */*/*/*.py")
local("autopep8 -i */*/*/*/*.py")
local("autopep8 -i */*/*/*/*/*.py")
local("autopep8 -i */*/*/*/*/*/*.py")
@task
def install():
"""install pep8, autopep8, pylint"""
local("pip install autopep8 --upgrade")
local("pip install pep8 --upgrade")
local("pip install pylint --upgrade")
local("pip install pyflakes --upgrade")
|
mapper_key_value = "likedeal"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.