body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def weight_variable(shape):
'weight_variable generates a weight variable of a given shape.'
initial = tf.truncated_normal(shape, stddev=(0.1 / math.sqrt(float(hiddenlayer_units))))
return tf.Variable(initial)
| -722,179,264,155,899,100
|
weight_variable generates a weight variable of a given shape.
|
FSL - Entire Project + Report/Final Project/Code/Exp1.py
|
weight_variable
|
AdityaPrasadMishra/TensorflowPractice
|
python
|
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=(0.1 / math.sqrt(float(hiddenlayer_units))))
return tf.Variable(initial)
|
def bias_variable(shape):
'bias_variable generates a bias variable of a given shape.'
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
| 4,559,501,419,305,478,000
|
bias_variable generates a bias variable of a given shape.
|
FSL - Entire Project + Report/Final Project/Code/Exp1.py
|
bias_variable
|
AdityaPrasadMishra/TensorflowPractice
|
python
|
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
|
def test_update_rules(self):
"Just make sure it doesn't crash"
self.map.update_rules(1, [])
| 3,621,848,459,840,140,300
|
Just make sure it doesn't crash
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_update_rules
|
mmidolesov2/neutron
|
python
|
def test_update_rules(self):
self.map.update_rules(1, [])
|
def test_update_members(self):
"Just make sure we doesn't crash"
self.map.update_members(1, [])
| -8,679,030,213,529,236,000
|
Just make sure we doesn't crash
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_update_members
|
mmidolesov2/neutron
|
python
|
def test_update_members(self):
self.map.update_members(1, [])
|
def test_update_port_filter_applies_added_flows(self):
'Check flows are applied right after _set_flows is called.'
port_dict = {'device': 'port-id', 'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
with self.firewall.defer_apply():
self.firewall.update_port_filter(port_dict)
self.assertEqual(2, self.mock_bridge.apply_flows.call_count)
| -1,399,839,882,994,460,200
|
Check flows are applied right after _set_flows is called.
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_update_port_filter_applies_added_flows
|
mmidolesov2/neutron
|
python
|
def test_update_port_filter_applies_added_flows(self):
port_dict = {'device': 'port-id', 'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
with self.firewall.defer_apply():
self.firewall.update_port_filter(port_dict)
self.assertEqual(2, self.mock_bridge.apply_flows.call_count)
|
def test_update_security_group_rules(self):
"Just make sure it doesn't crash"
new_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION, 'remote_group_id': 2}]
self.firewall.update_security_group_rules(1, new_rules)
| 1,945,480,302,246,285,000
|
Just make sure it doesn't crash
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_update_security_group_rules
|
mmidolesov2/neutron
|
python
|
def test_update_security_group_rules(self):
new_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION, 'remote_group_id': 2}]
self.firewall.update_security_group_rules(1, new_rules)
|
def test_update_security_group_members(self):
"Just make sure it doesn't crash"
new_members = {constants.IPv4: [1, 2, 3, 4]}
self.firewall.update_security_group_members(2, new_members)
| 4,622,323,072,390,496,000
|
Just make sure it doesn't crash
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_update_security_group_members
|
mmidolesov2/neutron
|
python
|
def test_update_security_group_members(self):
new_members = {constants.IPv4: [1, 2, 3, 4]}
self.firewall.update_security_group_members(2, new_members)
|
def test_process_trusted_ports_port_not_found(self):
'Check that exception is not propagated outside.'
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.firewall.process_trusted_ports(['port_id'])
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
| -7,640,833,490,315,465,000
|
Check that exception is not propagated outside.
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_process_trusted_ports_port_not_found
|
mmidolesov2/neutron
|
python
|
def test_process_trusted_ports_port_not_found(self):
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.firewall.process_trusted_ports(['port_id'])
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
|
def test_remove_trusted_ports_not_managed_port(self):
'Check that exception is not propagated outside.'
self.firewall.remove_trusted_ports(['port_id'])
| 1,033,683,824,962,227,200
|
Check that exception is not propagated outside.
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py
|
test_remove_trusted_ports_not_managed_port
|
mmidolesov2/neutron
|
python
|
def test_remove_trusted_ports_not_managed_port(self):
self.firewall.remove_trusted_ports(['port_id'])
|
def element(self, uri, content, attributes={}):
'Utility method for adding a complete simple element'
self.push(uri)
for (k, v) in attributes.iteritems():
self.attribute(k, v)
self.text(content)
self.pop()
| -5,322,985,118,655,762,000
|
Utility method for adding a complete simple element
|
lib/rdflib/plugins/serializers/xmlwriter.py
|
element
|
27theworldinurhand/schemaorg
|
python
|
def element(self, uri, content, attributes={}):
self.push(uri)
for (k, v) in attributes.iteritems():
self.attribute(k, v)
self.text(content)
self.pop()
|
def qname(self, uri):
'Compute qname for a uri using our extra namespaces,\n or the given namespace manager'
for (pre, ns) in self.extra_ns.items():
if uri.startswith(ns):
if (pre != ''):
return ':'.join(pre, uri[len(ns):])
else:
return uri[len(ns):]
return self.nm.qname(uri)
| -4,990,880,594,916,725,000
|
Compute qname for a uri using our extra namespaces,
or the given namespace manager
|
lib/rdflib/plugins/serializers/xmlwriter.py
|
qname
|
27theworldinurhand/schemaorg
|
python
|
def qname(self, uri):
'Compute qname for a uri using our extra namespaces,\n or the given namespace manager'
for (pre, ns) in self.extra_ns.items():
if uri.startswith(ns):
if (pre != ):
return ':'.join(pre, uri[len(ns):])
else:
return uri[len(ns):]
return self.nm.qname(uri)
|
def test_logout_auth_user(test_client):
'\n GIVEN a flask app\n WHEN an authorized user logs out\n THEN check that the user was logged out successfully\n '
log_in(test_client)
response = test_client.get('auth/logout', follow_redirects=True)
assert (response.status_code == 200)
assert (b'You have been logged out.' in response.data)
| 1,633,005,840,936,243,200
|
GIVEN a flask app
WHEN an authorized user logs out
THEN check that the user was logged out successfully
|
tests/test_auth/test_logout.py
|
test_logout_auth_user
|
KGB33/Wedding-Website
|
python
|
def test_logout_auth_user(test_client):
'\n GIVEN a flask app\n WHEN an authorized user logs out\n THEN check that the user was logged out successfully\n '
log_in(test_client)
response = test_client.get('auth/logout', follow_redirects=True)
assert (response.status_code == 200)
assert (b'You have been logged out.' in response.data)
|
def test_logout_anon_user(test_client):
'\n GIVEN a flask app\n WHEN an anon user attemps to log out\n THEN check that a message flashes informing them that they are already logged out.\n '
response = test_client.get('auth/logout', follow_redirects=True)
assert (response.status_code == 200)
assert (b'You were not, and still are not, logged in.' in response.data)
| 728,286,334,456,950,100
|
GIVEN a flask app
WHEN an anon user attemps to log out
THEN check that a message flashes informing them that they are already logged out.
|
tests/test_auth/test_logout.py
|
test_logout_anon_user
|
KGB33/Wedding-Website
|
python
|
def test_logout_anon_user(test_client):
'\n GIVEN a flask app\n WHEN an anon user attemps to log out\n THEN check that a message flashes informing them that they are already logged out.\n '
response = test_client.get('auth/logout', follow_redirects=True)
assert (response.status_code == 200)
assert (b'You were not, and still are not, logged in.' in response.data)
|
def demo():
'Output:\n ---------β\n ----------\n ----?????-\n ----------\n ----------\n --!!!-----\n --!!!-----\n ----------\n ----------\n β---------\n '
n = 10
grid = {}
grid[(0, 0)] = 'β'
grid[((n - 1), (n - 1))] = 'β'
fill(grid, '!', start=(2, 3), stop=(5, 5))
fill(grid, '?', start=(4, 7), stop=(9, 8))
print(stringify(grid, n))
| -2,668,569,732,923,006,500
|
Output:
---------β
----------
----?????-
----------
----------
--!!!-----
--!!!-----
----------
----------
β---------
|
examples/grids/python/grid.py
|
demo
|
ssangervasi/examples
|
python
|
def demo():
'Output:\n ---------β\n ----------\n ----?????-\n ----------\n ----------\n --!!!-----\n --!!!-----\n ----------\n ----------\n β---------\n '
n = 10
grid = {}
grid[(0, 0)] = 'β'
grid[((n - 1), (n - 1))] = 'β'
fill(grid, '!', start=(2, 3), stop=(5, 5))
fill(grid, '?', start=(4, 7), stop=(9, 8))
print(stringify(grid, n))
|
def fill(grid: dict, value: str, start=(0, 0), stop=(0, 0)):
'Using product allows for flatter loops.'
from itertools import product
for coord in product(range(start[0], stop[0]), range(start[1], stop[1])):
grid[coord] = value
| -679,394,345,175,105,200
|
Using product allows for flatter loops.
|
examples/grids/python/grid.py
|
fill
|
ssangervasi/examples
|
python
|
def fill(grid: dict, value: str, start=(0, 0), stop=(0, 0)):
from itertools import product
for coord in product(range(start[0], stop[0]), range(start[1], stop[1])):
grid[coord] = value
|
def stringify(grid: dict, n: int) -> str:
'Stringify with (0, 0) in the lower-left corner.'
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), '-')
row.append(value)
rows.append(row)
return '\n'.join((''.join(row) for row in rows))
| 2,110,890,005,807,589,400
|
Stringify with (0, 0) in the lower-left corner.
|
examples/grids/python/grid.py
|
stringify
|
ssangervasi/examples
|
python
|
def stringify(grid: dict, n: int) -> str:
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), '-')
row.append(value)
rows.append(row)
return '\n'.join((.join(row) for row in rows))
|
def __init__(self, ancestor_counts=None, record_set=None, rule=None, rfv=None, n_per=None, top_n=None, limits=None, table_name=None, name=None):
'Selection - a model defined in OpenAPI'
self._ancestor_counts = None
self._record_set = None
self._rule = None
self._rfv = None
self._n_per = None
self._top_n = None
self._limits = None
self._table_name = None
self._name = None
self.discriminator = None
if (ancestor_counts is not None):
self.ancestor_counts = ancestor_counts
if (record_set is not None):
self.record_set = record_set
if (rule is not None):
self.rule = rule
if (rfv is not None):
self.rfv = rfv
if (n_per is not None):
self.n_per = n_per
if (top_n is not None):
self.top_n = top_n
if (limits is not None):
self.limits = limits
self.table_name = table_name
if (name is not None):
self.name = name
| -6,999,714,526,438,198,000
|
Selection - a model defined in OpenAPI
|
apteco_api/models/selection.py
|
__init__
|
Apteco/apteco-api
|
python
|
def __init__(self, ancestor_counts=None, record_set=None, rule=None, rfv=None, n_per=None, top_n=None, limits=None, table_name=None, name=None):
self._ancestor_counts = None
self._record_set = None
self._rule = None
self._rfv = None
self._n_per = None
self._top_n = None
self._limits = None
self._table_name = None
self._name = None
self.discriminator = None
if (ancestor_counts is not None):
self.ancestor_counts = ancestor_counts
if (record_set is not None):
self.record_set = record_set
if (rule is not None):
self.rule = rule
if (rfv is not None):
self.rfv = rfv
if (n_per is not None):
self.n_per = n_per
if (top_n is not None):
self.top_n = top_n
if (limits is not None):
self.limits = limits
self.table_name = table_name
if (name is not None):
self.name = name
|
@property
def ancestor_counts(self):
'Gets the ancestor_counts of this Selection. # noqa: E501\n\n\n :return: The ancestor_counts of this Selection. # noqa: E501\n :rtype: bool\n '
return self._ancestor_counts
| -3,247,776,088,569,675,000
|
Gets the ancestor_counts of this Selection. # noqa: E501
:return: The ancestor_counts of this Selection. # noqa: E501
:rtype: bool
|
apteco_api/models/selection.py
|
ancestor_counts
|
Apteco/apteco-api
|
python
|
@property
def ancestor_counts(self):
'Gets the ancestor_counts of this Selection. # noqa: E501\n\n\n :return: The ancestor_counts of this Selection. # noqa: E501\n :rtype: bool\n '
return self._ancestor_counts
|
@ancestor_counts.setter
def ancestor_counts(self, ancestor_counts):
'Sets the ancestor_counts of this Selection.\n\n\n :param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501\n :type: bool\n '
self._ancestor_counts = ancestor_counts
| 4,084,213,159,388,093,400
|
Sets the ancestor_counts of this Selection.
:param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501
:type: bool
|
apteco_api/models/selection.py
|
ancestor_counts
|
Apteco/apteco-api
|
python
|
@ancestor_counts.setter
def ancestor_counts(self, ancestor_counts):
'Sets the ancestor_counts of this Selection.\n\n\n :param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501\n :type: bool\n '
self._ancestor_counts = ancestor_counts
|
@property
def record_set(self):
'Gets the record_set of this Selection. # noqa: E501\n\n\n :return: The record_set of this Selection. # noqa: E501\n :rtype: RecordSet\n '
return self._record_set
| -1,940,476,933,900,348,200
|
Gets the record_set of this Selection. # noqa: E501
:return: The record_set of this Selection. # noqa: E501
:rtype: RecordSet
|
apteco_api/models/selection.py
|
record_set
|
Apteco/apteco-api
|
python
|
@property
def record_set(self):
'Gets the record_set of this Selection. # noqa: E501\n\n\n :return: The record_set of this Selection. # noqa: E501\n :rtype: RecordSet\n '
return self._record_set
|
@record_set.setter
def record_set(self, record_set):
'Sets the record_set of this Selection.\n\n\n :param record_set: The record_set of this Selection. # noqa: E501\n :type: RecordSet\n '
self._record_set = record_set
| 3,298,788,785,948,843,500
|
Sets the record_set of this Selection.
:param record_set: The record_set of this Selection. # noqa: E501
:type: RecordSet
|
apteco_api/models/selection.py
|
record_set
|
Apteco/apteco-api
|
python
|
@record_set.setter
def record_set(self, record_set):
'Sets the record_set of this Selection.\n\n\n :param record_set: The record_set of this Selection. # noqa: E501\n :type: RecordSet\n '
self._record_set = record_set
|
@property
def rule(self):
'Gets the rule of this Selection. # noqa: E501\n\n\n :return: The rule of this Selection. # noqa: E501\n :rtype: Rule\n '
return self._rule
| 7,931,853,896,142,618,000
|
Gets the rule of this Selection. # noqa: E501
:return: The rule of this Selection. # noqa: E501
:rtype: Rule
|
apteco_api/models/selection.py
|
rule
|
Apteco/apteco-api
|
python
|
@property
def rule(self):
'Gets the rule of this Selection. # noqa: E501\n\n\n :return: The rule of this Selection. # noqa: E501\n :rtype: Rule\n '
return self._rule
|
@rule.setter
def rule(self, rule):
'Sets the rule of this Selection.\n\n\n :param rule: The rule of this Selection. # noqa: E501\n :type: Rule\n '
self._rule = rule
| 6,730,253,385,272,637,000
|
Sets the rule of this Selection.
:param rule: The rule of this Selection. # noqa: E501
:type: Rule
|
apteco_api/models/selection.py
|
rule
|
Apteco/apteco-api
|
python
|
@rule.setter
def rule(self, rule):
'Sets the rule of this Selection.\n\n\n :param rule: The rule of this Selection. # noqa: E501\n :type: Rule\n '
self._rule = rule
|
@property
def rfv(self):
'Gets the rfv of this Selection. # noqa: E501\n\n\n :return: The rfv of this Selection. # noqa: E501\n :rtype: RFV\n '
return self._rfv
| -5,043,599,251,545,374,000
|
Gets the rfv of this Selection. # noqa: E501
:return: The rfv of this Selection. # noqa: E501
:rtype: RFV
|
apteco_api/models/selection.py
|
rfv
|
Apteco/apteco-api
|
python
|
@property
def rfv(self):
'Gets the rfv of this Selection. # noqa: E501\n\n\n :return: The rfv of this Selection. # noqa: E501\n :rtype: RFV\n '
return self._rfv
|
@rfv.setter
def rfv(self, rfv):
'Sets the rfv of this Selection.\n\n\n :param rfv: The rfv of this Selection. # noqa: E501\n :type: RFV\n '
self._rfv = rfv
| 5,855,284,156,993,970,000
|
Sets the rfv of this Selection.
:param rfv: The rfv of this Selection. # noqa: E501
:type: RFV
|
apteco_api/models/selection.py
|
rfv
|
Apteco/apteco-api
|
python
|
@rfv.setter
def rfv(self, rfv):
'Sets the rfv of this Selection.\n\n\n :param rfv: The rfv of this Selection. # noqa: E501\n :type: RFV\n '
self._rfv = rfv
|
@property
def n_per(self):
'Gets the n_per of this Selection. # noqa: E501\n\n\n :return: The n_per of this Selection. # noqa: E501\n :rtype: NPer\n '
return self._n_per
| 5,518,704,617,051,992,000
|
Gets the n_per of this Selection. # noqa: E501
:return: The n_per of this Selection. # noqa: E501
:rtype: NPer
|
apteco_api/models/selection.py
|
n_per
|
Apteco/apteco-api
|
python
|
@property
def n_per(self):
'Gets the n_per of this Selection. # noqa: E501\n\n\n :return: The n_per of this Selection. # noqa: E501\n :rtype: NPer\n '
return self._n_per
|
@n_per.setter
def n_per(self, n_per):
'Sets the n_per of this Selection.\n\n\n :param n_per: The n_per of this Selection. # noqa: E501\n :type: NPer\n '
self._n_per = n_per
| 3,153,032,923,048,521,700
|
Sets the n_per of this Selection.
:param n_per: The n_per of this Selection. # noqa: E501
:type: NPer
|
apteco_api/models/selection.py
|
n_per
|
Apteco/apteco-api
|
python
|
@n_per.setter
def n_per(self, n_per):
'Sets the n_per of this Selection.\n\n\n :param n_per: The n_per of this Selection. # noqa: E501\n :type: NPer\n '
self._n_per = n_per
|
@property
def top_n(self):
'Gets the top_n of this Selection. # noqa: E501\n\n\n :return: The top_n of this Selection. # noqa: E501\n :rtype: TopN\n '
return self._top_n
| 9,146,417,730,161,683,000
|
Gets the top_n of this Selection. # noqa: E501
:return: The top_n of this Selection. # noqa: E501
:rtype: TopN
|
apteco_api/models/selection.py
|
top_n
|
Apteco/apteco-api
|
python
|
@property
def top_n(self):
'Gets the top_n of this Selection. # noqa: E501\n\n\n :return: The top_n of this Selection. # noqa: E501\n :rtype: TopN\n '
return self._top_n
|
@top_n.setter
def top_n(self, top_n):
'Sets the top_n of this Selection.\n\n\n :param top_n: The top_n of this Selection. # noqa: E501\n :type: TopN\n '
self._top_n = top_n
| 5,989,069,227,293,145,000
|
Sets the top_n of this Selection.
:param top_n: The top_n of this Selection. # noqa: E501
:type: TopN
|
apteco_api/models/selection.py
|
top_n
|
Apteco/apteco-api
|
python
|
@top_n.setter
def top_n(self, top_n):
'Sets the top_n of this Selection.\n\n\n :param top_n: The top_n of this Selection. # noqa: E501\n :type: TopN\n '
self._top_n = top_n
|
@property
def limits(self):
'Gets the limits of this Selection. # noqa: E501\n\n\n :return: The limits of this Selection. # noqa: E501\n :rtype: Limits\n '
return self._limits
| -7,770,541,340,939,093,000
|
Gets the limits of this Selection. # noqa: E501
:return: The limits of this Selection. # noqa: E501
:rtype: Limits
|
apteco_api/models/selection.py
|
limits
|
Apteco/apteco-api
|
python
|
@property
def limits(self):
'Gets the limits of this Selection. # noqa: E501\n\n\n :return: The limits of this Selection. # noqa: E501\n :rtype: Limits\n '
return self._limits
|
@limits.setter
def limits(self, limits):
'Sets the limits of this Selection.\n\n\n :param limits: The limits of this Selection. # noqa: E501\n :type: Limits\n '
self._limits = limits
| 5,356,089,318,034,633,000
|
Sets the limits of this Selection.
:param limits: The limits of this Selection. # noqa: E501
:type: Limits
|
apteco_api/models/selection.py
|
limits
|
Apteco/apteco-api
|
python
|
@limits.setter
def limits(self, limits):
'Sets the limits of this Selection.\n\n\n :param limits: The limits of this Selection. # noqa: E501\n :type: Limits\n '
self._limits = limits
|
@property
def table_name(self):
'Gets the table_name of this Selection. # noqa: E501\n\n\n :return: The table_name of this Selection. # noqa: E501\n :rtype: str\n '
return self._table_name
| -4,334,643,837,896,846,300
|
Gets the table_name of this Selection. # noqa: E501
:return: The table_name of this Selection. # noqa: E501
:rtype: str
|
apteco_api/models/selection.py
|
table_name
|
Apteco/apteco-api
|
python
|
@property
def table_name(self):
'Gets the table_name of this Selection. # noqa: E501\n\n\n :return: The table_name of this Selection. # noqa: E501\n :rtype: str\n '
return self._table_name
|
@table_name.setter
def table_name(self, table_name):
'Sets the table_name of this Selection.\n\n\n :param table_name: The table_name of this Selection. # noqa: E501\n :type: str\n '
if (table_name is None):
raise ValueError('Invalid value for `table_name`, must not be `None`')
self._table_name = table_name
| -8,181,616,920,197,953,000
|
Sets the table_name of this Selection.
:param table_name: The table_name of this Selection. # noqa: E501
:type: str
|
apteco_api/models/selection.py
|
table_name
|
Apteco/apteco-api
|
python
|
@table_name.setter
def table_name(self, table_name):
'Sets the table_name of this Selection.\n\n\n :param table_name: The table_name of this Selection. # noqa: E501\n :type: str\n '
if (table_name is None):
raise ValueError('Invalid value for `table_name`, must not be `None`')
self._table_name = table_name
|
@property
def name(self):
'Gets the name of this Selection. # noqa: E501\n\n\n :return: The name of this Selection. # noqa: E501\n :rtype: str\n '
return self._name
| -2,109,806,360,794,677,500
|
Gets the name of this Selection. # noqa: E501
:return: The name of this Selection. # noqa: E501
:rtype: str
|
apteco_api/models/selection.py
|
name
|
Apteco/apteco-api
|
python
|
@property
def name(self):
'Gets the name of this Selection. # noqa: E501\n\n\n :return: The name of this Selection. # noqa: E501\n :rtype: str\n '
return self._name
|
@name.setter
def name(self, name):
'Sets the name of this Selection.\n\n\n :param name: The name of this Selection. # noqa: E501\n :type: str\n '
self._name = name
| 2,515,383,182,738,667,500
|
Sets the name of this Selection.
:param name: The name of this Selection. # noqa: E501
:type: str
|
apteco_api/models/selection.py
|
name
|
Apteco/apteco-api
|
python
|
@name.setter
def name(self, name):
'Sets the name of this Selection.\n\n\n :param name: The name of this Selection. # noqa: E501\n :type: str\n '
self._name = name
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 8,442,519,487,048,767,000
|
Returns the model properties as a dict
|
apteco_api/models/selection.py
|
to_dict
|
Apteco/apteco-api
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
apteco_api/models/selection.py
|
to_str
|
Apteco/apteco-api
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
apteco_api/models/selection.py
|
__repr__
|
Apteco/apteco-api
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, Selection)):
return False
return (self.__dict__ == other.__dict__)
| 6,380,681,132,851,042,000
|
Returns true if both objects are equal
|
apteco_api/models/selection.py
|
__eq__
|
Apteco/apteco-api
|
python
|
def __eq__(self, other):
if (not isinstance(other, Selection)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
apteco_api/models/selection.py
|
__ne__
|
Apteco/apteco-api
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def update(self, delta, wind):
'\n Integrate the differential equations defining dynamics, update sensors\n delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs\n wind is the wind vector in inertial coordinates\n Ts is the time step between function calls.\n '
forces_moments = self._forces_moments(delta)
time_step = self._ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives((self._state + ((time_step / 2.0) * k1)), forces_moments)
k3 = self._derivatives((self._state + ((time_step / 2.0) * k2)), forces_moments)
k4 = self._derivatives((self._state + (time_step * k3)), forces_moments)
self._state += ((time_step / 6) * (((k1 + (2 * k2)) + (2 * k3)) + k4))
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(((((e0 ** 2) + (e1 ** 2)) + (e2 ** 2)) + (e3 ** 2)))
self._state[6][0] = (self._state.item(6) / normE)
self._state[7][0] = (self._state.item(7) / normE)
self._state[8][0] = (self._state.item(8) / normE)
self._state[9][0] = (self._state.item(9) / normE)
self._update_velocity_data(wind)
self._update_true_state()
| -2,032,833,507,957,320,700
|
Integrate the differential equations defining dynamics, update sensors
delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs
wind is the wind vector in inertial coordinates
Ts is the time step between function calls.
|
Lectures/MAV_Dynamics/mav_dynamics.py
|
update
|
donnel2-cooper/drone_control
|
python
|
def update(self, delta, wind):
'\n Integrate the differential equations defining dynamics, update sensors\n delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs\n wind is the wind vector in inertial coordinates\n Ts is the time step between function calls.\n '
forces_moments = self._forces_moments(delta)
time_step = self._ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives((self._state + ((time_step / 2.0) * k1)), forces_moments)
k3 = self._derivatives((self._state + ((time_step / 2.0) * k2)), forces_moments)
k4 = self._derivatives((self._state + (time_step * k3)), forces_moments)
self._state += ((time_step / 6) * (((k1 + (2 * k2)) + (2 * k3)) + k4))
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(((((e0 ** 2) + (e1 ** 2)) + (e2 ** 2)) + (e3 ** 2)))
self._state[6][0] = (self._state.item(6) / normE)
self._state[7][0] = (self._state.item(7) / normE)
self._state[8][0] = (self._state.item(8) / normE)
self._state[9][0] = (self._state.item(9) / normE)
self._update_velocity_data(wind)
self._update_true_state()
|
def _derivatives(self, x, u):
'\n for the dynamics xdot = f(x, u), returns fdot(x, u)\n '
f_b = u[:3]
m_b = u[3:]
r_i = x[:3]
v_b = x[3:6]
q_ib = x[6:10]
w_b = x[10:]
q_ib = (q_ib / np.linalg.norm(q_ib))
R_ib = Quaternion2Rotation(q_ib)
rdot_i = (R_ib @ v_b)
vdot_b = (((1 / MAV.mass) * f_b) - (skew(w_b) @ v_b))
wq_ib = np.zeros((4, 1))
wq_ib[1:] = w_b
qdot_ib = (0.5 * quat_prod(wq_ib, q_ib))
wt_b = skew(w_b)
wdot_b = (np.linalg.inv(MAV.J) @ (m_b - (wt_b @ (MAV.J @ w_b))))
x_out = np.concatenate([rdot_i, vdot_b, qdot_ib, np.array(wdot_b)], axis=0)
return x_out
| 6,314,086,267,136,015,000
|
for the dynamics xdot = f(x, u), returns fdot(x, u)
|
Lectures/MAV_Dynamics/mav_dynamics.py
|
_derivatives
|
donnel2-cooper/drone_control
|
python
|
def _derivatives(self, x, u):
'\n \n '
f_b = u[:3]
m_b = u[3:]
r_i = x[:3]
v_b = x[3:6]
q_ib = x[6:10]
w_b = x[10:]
q_ib = (q_ib / np.linalg.norm(q_ib))
R_ib = Quaternion2Rotation(q_ib)
rdot_i = (R_ib @ v_b)
vdot_b = (((1 / MAV.mass) * f_b) - (skew(w_b) @ v_b))
wq_ib = np.zeros((4, 1))
wq_ib[1:] = w_b
qdot_ib = (0.5 * quat_prod(wq_ib, q_ib))
wt_b = skew(w_b)
wdot_b = (np.linalg.inv(MAV.J) @ (m_b - (wt_b @ (MAV.J @ w_b))))
x_out = np.concatenate([rdot_i, vdot_b, qdot_ib, np.array(wdot_b)], axis=0)
return x_out
|
def _forces_moments(self, delta):
'\n return the forces on the UAV based on the state, wind, and control surfaces\n :param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)\n :return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)\n '
(phi, theta, psi) = Quaternion2Euler(self._state[6:10])
p = self._state.item(10)
q = self._state.item(11)
r = self._state.item(12)
delta_e = delta.item(0)
delta_a = delta.item(1)
delta_r = delta.item(2)
delta_t = delta.item(3)
mg = (MAV.mass * MAV.gravity)
fx_grav = ((- mg) * np.sin(theta))
fy_grav = ((mg * np.cos(theta)) * np.sin(phi))
fz_grav = ((mg * np.cos(theta)) * np.cos(phi))
(fx_thrust, Mx_thrust) = self.thrust_from_prop(delta_t)
fy_thrust = 0
fz_thrust = 0
My_thrust = 0
Mz_thrust = 0
b = MAV.b
cyp = MAV.C_Y_p
cyr = MAV.C_Y_r
cydeltaa = MAV.C_Y_delta_a
cydeltar = MAV.C_Y_delta_r
aero_coef = (((0.5 * MAV.rho) * (self._Va ** 2)) * MAV.S_wing)
fx_aero = (aero_coef * ((self.Cx(self._alpha) + (((self.Cx_q(self._alpha) * MAV.c) / (2 * self._Va)) * q)) + (self.Cx_deltae(self._alpha) * delta_e)))
fy_aero = (aero_coef * (((((MAV.C_Y_0 + (MAV.C_Y_beta * self._beta)) + (((MAV.C_Y_p * b) / (2 * self._Va)) * p)) + (((cyr * b) / (2 * self._Va)) * r)) + (cydeltaa * delta_a)) + (cydeltar * delta_r)))
fz_aero = (aero_coef * ((self.Cz(self._alpha) + (((self.Cz_q(self._alpha) * MAV.c) / (2 * self._Va)) * q)) + (self.Cz_deltae(self._alpha) * delta_e)))
Mx_aero = ((aero_coef * MAV.b) * (((((MAV.C_ell_0 + (MAV.C_ell_beta * self._beta)) + (((MAV.C_ell_p * b) / (2 * self._Va)) * p)) + (((MAV.C_ell_r * b) / (2 * self._Va)) * r)) + (MAV.C_ell_delta_a * delta_a)) + (MAV.C_ell_delta_r * delta_r)))
My_aero = ((aero_coef * MAV.c) * (((MAV.C_m_0 + (MAV.C_m_alpha * self._alpha)) + (((MAV.C_m_q * MAV.c) / (2 * self._Va)) * q)) + (MAV.C_m_delta_e * delta_e)))
Mz_aero = ((aero_coef * MAV.b) * (((((MAV.C_n_0 + (MAV.C_n_beta * self._beta)) + (((MAV.C_n_p * MAV.b) / (2 * self._Va)) * p)) + (((MAV.C_n_r * MAV.b) / (2 * self._Va)) * r)) + (MAV.C_n_delta_a * delta_a)) + (MAV.C_n_delta_r * delta_r)))
fx = ((fx_grav + fx_aero) + fx_thrust)
fy = ((fy_grav + fy_aero) + fy_thrust)
fz = ((fz_grav + fz_aero) + fz_thrust)
Mx = (Mx_aero + Mx_thrust)
My = (My_aero + My_thrust)
Mz = (Mz_aero + Mz_thrust)
self._forces[0] = fx
self._forces[1] = fy
self._forces[2] = fz
fm = np.reshape(np.array([fx, fy, fz, Mx, My, Mz]), [6, 1])
return fm
| 8,441,166,201,938,777,000
|
return the forces on the UAV based on the state, wind, and control surfaces
:param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)
:return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)
|
Lectures/MAV_Dynamics/mav_dynamics.py
|
_forces_moments
|
donnel2-cooper/drone_control
|
python
|
def _forces_moments(self, delta):
'\n return the forces on the UAV based on the state, wind, and control surfaces\n :param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)\n :return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)\n '
(phi, theta, psi) = Quaternion2Euler(self._state[6:10])
p = self._state.item(10)
q = self._state.item(11)
r = self._state.item(12)
delta_e = delta.item(0)
delta_a = delta.item(1)
delta_r = delta.item(2)
delta_t = delta.item(3)
mg = (MAV.mass * MAV.gravity)
fx_grav = ((- mg) * np.sin(theta))
fy_grav = ((mg * np.cos(theta)) * np.sin(phi))
fz_grav = ((mg * np.cos(theta)) * np.cos(phi))
(fx_thrust, Mx_thrust) = self.thrust_from_prop(delta_t)
fy_thrust = 0
fz_thrust = 0
My_thrust = 0
Mz_thrust = 0
b = MAV.b
cyp = MAV.C_Y_p
cyr = MAV.C_Y_r
cydeltaa = MAV.C_Y_delta_a
cydeltar = MAV.C_Y_delta_r
aero_coef = (((0.5 * MAV.rho) * (self._Va ** 2)) * MAV.S_wing)
fx_aero = (aero_coef * ((self.Cx(self._alpha) + (((self.Cx_q(self._alpha) * MAV.c) / (2 * self._Va)) * q)) + (self.Cx_deltae(self._alpha) * delta_e)))
fy_aero = (aero_coef * (((((MAV.C_Y_0 + (MAV.C_Y_beta * self._beta)) + (((MAV.C_Y_p * b) / (2 * self._Va)) * p)) + (((cyr * b) / (2 * self._Va)) * r)) + (cydeltaa * delta_a)) + (cydeltar * delta_r)))
fz_aero = (aero_coef * ((self.Cz(self._alpha) + (((self.Cz_q(self._alpha) * MAV.c) / (2 * self._Va)) * q)) + (self.Cz_deltae(self._alpha) * delta_e)))
Mx_aero = ((aero_coef * MAV.b) * (((((MAV.C_ell_0 + (MAV.C_ell_beta * self._beta)) + (((MAV.C_ell_p * b) / (2 * self._Va)) * p)) + (((MAV.C_ell_r * b) / (2 * self._Va)) * r)) + (MAV.C_ell_delta_a * delta_a)) + (MAV.C_ell_delta_r * delta_r)))
My_aero = ((aero_coef * MAV.c) * (((MAV.C_m_0 + (MAV.C_m_alpha * self._alpha)) + (((MAV.C_m_q * MAV.c) / (2 * self._Va)) * q)) + (MAV.C_m_delta_e * delta_e)))
Mz_aero = ((aero_coef * MAV.b) * (((((MAV.C_n_0 + (MAV.C_n_beta * self._beta)) + (((MAV.C_n_p * MAV.b) / (2 * self._Va)) * p)) + (((MAV.C_n_r * MAV.b) / (2 * self._Va)) * r)) + (MAV.C_n_delta_a * delta_a)) + (MAV.C_n_delta_r * delta_r)))
fx = ((fx_grav + fx_aero) + fx_thrust)
fy = ((fy_grav + fy_aero) + fy_thrust)
fz = ((fz_grav + fz_aero) + fz_thrust)
Mx = (Mx_aero + Mx_thrust)
My = (My_aero + My_thrust)
Mz = (Mz_aero + Mz_thrust)
self._forces[0] = fx
self._forces[1] = fy
self._forces[2] = fz
fm = np.reshape(np.array([fx, fy, fz, Mx, My, Mz]), [6, 1])
return fm
|
def _compute_K(self, F, X, variance, X2=None):
'\n The internal interface for the actual covariance matrix computation.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param X2: (optional) the second set of arguments to the kernel. If X2 is None,\n this computes a square covariance matrix of X. In other words, X2 is internally treated as X.\n :type X2: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
if (X2 is None):
X2 = X
return broadcast_to_w_samples(F, variance, (X.shape[:(- 1)] + (X2.shape[(- 2)],)))
| -8,129,138,380,491,726,000
|
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None,
this computes a square covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
|
mxfusion/components/distributions/gp/kernels/static.py
|
_compute_K
|
DerrickGXD/MXFusion
|
python
|
def _compute_K(self, F, X, variance, X2=None):
'\n The internal interface for the actual covariance matrix computation.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param X2: (optional) the second set of arguments to the kernel. If X2 is None,\n this computes a square covariance matrix of X. In other words, X2 is internally treated as X.\n :type X2: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
if (X2 is None):
X2 = X
return broadcast_to_w_samples(F, variance, (X.shape[:(- 1)] + (X2.shape[(- 2)],)))
|
def _compute_Kdiag(self, F, X, variance):
'\n The internal interface for the actual computation for the diagonal.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
return broadcast_to_w_samples(F, variance, X.shape[:(- 1)])
| -168,833,235,967,969,820
|
The internal interface for the actual computation for the diagonal.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
|
mxfusion/components/distributions/gp/kernels/static.py
|
_compute_Kdiag
|
DerrickGXD/MXFusion
|
python
|
def _compute_Kdiag(self, F, X, variance):
'\n The internal interface for the actual computation for the diagonal.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
return broadcast_to_w_samples(F, variance, X.shape[:(- 1)])
|
def _compute_K(self, F, X, variance, X2=None):
'\n The internal interface for the actual covariance matrix computation.\n\n :param F: MXNet computation type <mx.sym, mx.nd>\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square\n covariance matrix of X. In other words, X2 is internally treated as X.\n :type X2: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
if (X2 is None):
Imat = F.eye(N=X.shape[(- 2):(- 1)][0], ctx=self.ctx, dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, (X.shape[:(- 1)] + X.shape[(- 2):(- 1)]), False)
return (Imat * broadcast_to_w_samples(F, variance, (X.shape[:(- 1)] + X.shape[(- 2):(- 1)])))
else:
return F.zeros(shape=(X.shape[:(- 1)] + X2.shape[(- 2):(- 1)]), ctx=self.ctx, dtype=self.dtype)
| 8,793,004,223,122,019,000
|
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
|
mxfusion/components/distributions/gp/kernels/static.py
|
_compute_K
|
DerrickGXD/MXFusion
|
python
|
def _compute_K(self, F, X, variance, X2=None):
'\n The internal interface for the actual covariance matrix computation.\n\n :param F: MXNet computation type <mx.sym, mx.nd>\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square\n covariance matrix of X. In other words, X2 is internally treated as X.\n :type X2: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
if (X2 is None):
Imat = F.eye(N=X.shape[(- 2):(- 1)][0], ctx=self.ctx, dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, (X.shape[:(- 1)] + X.shape[(- 2):(- 1)]), False)
return (Imat * broadcast_to_w_samples(F, variance, (X.shape[:(- 1)] + X.shape[(- 2):(- 1)])))
else:
return F.zeros(shape=(X.shape[:(- 1)] + X2.shape[(- 2):(- 1)]), ctx=self.ctx, dtype=self.dtype)
|
def _compute_Kdiag(self, F, X, variance):
'\n The internal interface for the actual computation for the diagonal of the covariance matrix.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
return broadcast_to_w_samples(F, variance, X.shape[:(- 1)])
| 3,239,860,383,167,945,700
|
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
|
mxfusion/components/distributions/gp/kernels/static.py
|
_compute_Kdiag
|
DerrickGXD/MXFusion
|
python
|
def _compute_Kdiag(self, F, X, variance):
'\n The internal interface for the actual computation for the diagonal of the covariance matrix.\n\n :param F: MXNet computation type <mx.sym, mx.nd>.\n :param X: the first set of inputs to the kernel.\n :type X: MXNet NDArray or MXNet Symbol\n :param variance: the variance parameter.\n :type variance: MXNet NDArray or MXNet Symbol\n :return: The covariance matrix.\n :rtype: MXNet NDArray or MXNet Symbol\n '
return broadcast_to_w_samples(F, variance, X.shape[:(- 1)])
|
def absolute_scope_name(relative_scope_name):
'Appends parent scope name to `relative_scope_name`'
base = get_scope_name()
if (len(base) > 0):
base += '/'
return (base + relative_scope_name)
| 6,378,313,978,072,777,000
|
Appends parent scope name to `relative_scope_name`
|
sandblox/util/scope.py
|
absolute_scope_name
|
SandBlox/sandblox
|
python
|
def absolute_scope_name(relative_scope_name):
base = get_scope_name()
if (len(base) > 0):
base += '/'
return (base + relative_scope_name)
|
def __init__(self):
'\n TextBotFlowLaunchResponse - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'id': 'str'}
self.attribute_map = {'id': 'id'}
self._id = None
| -4,489,177,517,340,297,700
|
TextBotFlowLaunchResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
__init__
|
MyPureCloud/platform-client-sdk-python
|
python
|
def __init__(self):
'\n TextBotFlowLaunchResponse - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'id': 'str'}
self.attribute_map = {'id': 'id'}
self._id = None
|
@property
def id(self):
'\n Gets the id of this TextBotFlowLaunchResponse.\n The session ID of the bot flow, used to send to subsequent turn requests\n\n :return: The id of this TextBotFlowLaunchResponse.\n :rtype: str\n '
return self._id
| 95,141,098,635,902,770
|
Gets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:return: The id of this TextBotFlowLaunchResponse.
:rtype: str
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
id
|
MyPureCloud/platform-client-sdk-python
|
python
|
@property
def id(self):
'\n Gets the id of this TextBotFlowLaunchResponse.\n The session ID of the bot flow, used to send to subsequent turn requests\n\n :return: The id of this TextBotFlowLaunchResponse.\n :rtype: str\n '
return self._id
|
@id.setter
def id(self, id):
'\n Sets the id of this TextBotFlowLaunchResponse.\n The session ID of the bot flow, used to send to subsequent turn requests\n\n :param id: The id of this TextBotFlowLaunchResponse.\n :type: str\n '
self._id = id
| -1,358,825,402,861,718,000
|
Sets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:param id: The id of this TextBotFlowLaunchResponse.
:type: str
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
id
|
MyPureCloud/platform-client-sdk-python
|
python
|
@id.setter
def id(self, id):
'\n Sets the id of this TextBotFlowLaunchResponse.\n The session ID of the bot flow, used to send to subsequent turn requests\n\n :param id: The id of this TextBotFlowLaunchResponse.\n :type: str\n '
self._id = id
|
def to_dict(self):
'\n Returns the model properties as a dict\n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 2,191,974,537,531,847,000
|
Returns the model properties as a dict
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
to_dict
|
MyPureCloud/platform-client-sdk-python
|
python
|
def to_dict(self):
'\n \n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_json(self):
'\n Returns the model as raw JSON\n '
return json.dumps(sanitize_for_serialization(self.to_dict()))
| 201,001,069,348,168,640
|
Returns the model as raw JSON
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
to_json
|
MyPureCloud/platform-client-sdk-python
|
python
|
def to_json(self):
'\n \n '
return json.dumps(sanitize_for_serialization(self.to_dict()))
|
def to_str(self):
'\n Returns the string representation of the model\n '
return pformat(self.to_dict())
| -3,531,024,894,346,511,000
|
Returns the string representation of the model
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
to_str
|
MyPureCloud/platform-client-sdk-python
|
python
|
def to_str(self):
'\n \n '
return pformat(self.to_dict())
|
def __repr__(self):
'\n For `print` and `pprint`\n '
return self.to_str()
| 5,853,962,500,611,353,000
|
For `print` and `pprint`
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
__repr__
|
MyPureCloud/platform-client-sdk-python
|
python
|
def __repr__(self):
'\n \n '
return self.to_str()
|
def __eq__(self, other):
'\n Returns true if both objects are equal\n '
return (self.__dict__ == other.__dict__)
| 3,599,733,221,149,238,300
|
Returns true if both objects are equal
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
__eq__
|
MyPureCloud/platform-client-sdk-python
|
python
|
def __eq__(self, other):
'\n \n '
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'\n Returns true if both objects are not equal\n '
return (not (self == other))
| 3,600,423,175,817,510,400
|
Returns true if both objects are not equal
|
build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py
|
__ne__
|
MyPureCloud/platform-client-sdk-python
|
python
|
def __ne__(self, other):
'\n \n '
return (not (self == other))
|
@property
def offline_status(self) -> MetadataManagerMessage:
'\n Status to publish when the manager goes offline.\n\n This status should ensure that any other components relying\n on this data go into a safe state.\n '
return MetadataManagerMessage(status=MetadataManagerMessage.Status.STOPPED, metadata=Metadata.init(self.config))
| -8,665,394,748,630,808,000
|
Status to publish when the manager goes offline.
This status should ensure that any other components relying
on this data go into a safe state.
|
astoria/astmetad/metadata_manager.py
|
offline_status
|
trickeydan/astoria
|
python
|
@property
def offline_status(self) -> MetadataManagerMessage:
'\n Status to publish when the manager goes offline.\n\n This status should ensure that any other components relying\n on this data go into a safe state.\n '
return MetadataManagerMessage(status=MetadataManagerMessage.Status.STOPPED, metadata=Metadata.init(self.config))
|
async def main(self) -> None:
'Main routine for astmetad.'
self.update_status()
(await self.wait_loop())
for (uuid, info) in self._cur_disks.items():
asyncio.ensure_future(self.handle_disk_removal(uuid, info))
| 4,665,077,025,833,202,000
|
Main routine for astmetad.
|
astoria/astmetad/metadata_manager.py
|
main
|
trickeydan/astoria
|
python
|
async def main(self) -> None:
self.update_status()
(await self.wait_loop())
for (uuid, info) in self._cur_disks.items():
asyncio.ensure_future(self.handle_disk_removal(uuid, info))
|
async def handle_disk_insertion(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
'Handle a disk insertion.'
LOGGER.debug(f'Disk inserted: {uuid} ({disk_info.disk_type})')
for (disk_type, lifecycle_class) in self.DISK_TYPE_LIFECYCLE_MAP.items():
if (disk_info.disk_type is disk_type):
LOGGER.info(f'{disk_type.name} disk {uuid} is mounted at {disk_info.mount_path}')
if (self._lifecycles[disk_type] is None):
LOGGER.debug(f'Starting lifecycle for {uuid}')
self._lifecycles[disk_type] = lifecycle_class(uuid, disk_info, self.config)
self.update_status()
else:
LOGGER.warn('Cannot use metadata, there is already a lifecycle present.')
| 758,388,486,186,581,200
|
Handle a disk insertion.
|
astoria/astmetad/metadata_manager.py
|
handle_disk_insertion
|
trickeydan/astoria
|
python
|
async def handle_disk_insertion(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
LOGGER.debug(f'Disk inserted: {uuid} ({disk_info.disk_type})')
for (disk_type, lifecycle_class) in self.DISK_TYPE_LIFECYCLE_MAP.items():
if (disk_info.disk_type is disk_type):
LOGGER.info(f'{disk_type.name} disk {uuid} is mounted at {disk_info.mount_path}')
if (self._lifecycles[disk_type] is None):
LOGGER.debug(f'Starting lifecycle for {uuid}')
self._lifecycles[disk_type] = lifecycle_class(uuid, disk_info, self.config)
self.update_status()
else:
LOGGER.warn('Cannot use metadata, there is already a lifecycle present.')
|
async def handle_disk_removal(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
'Handle a disk removal.'
LOGGER.debug(f'Disk removed: {uuid} ({disk_info.disk_type})')
for (disk_type, lifecycle_class) in self.DISK_TYPE_LIFECYCLE_MAP.items():
if (disk_info.disk_type is disk_type):
LOGGER.info(f'Metadata disk {uuid} removed ({disk_info.mount_path})')
lifecycle = self._lifecycles[disk_type]
if ((lifecycle is not None) and (lifecycle._uuid == disk_info.uuid)):
self._lifecycles[disk_type] = None
self.update_status()
| 3,454,666,556,045,228,000
|
Handle a disk removal.
|
astoria/astmetad/metadata_manager.py
|
handle_disk_removal
|
trickeydan/astoria
|
python
|
async def handle_disk_removal(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
LOGGER.debug(f'Disk removed: {uuid} ({disk_info.disk_type})')
for (disk_type, lifecycle_class) in self.DISK_TYPE_LIFECYCLE_MAP.items():
if (disk_info.disk_type is disk_type):
LOGGER.info(f'Metadata disk {uuid} removed ({disk_info.mount_path})')
lifecycle = self._lifecycles[disk_type]
if ((lifecycle is not None) and (lifecycle._uuid == disk_info.uuid)):
self._lifecycles[disk_type] = None
self.update_status()
|
async def handle_mutation_request(self, request: MetadataSetManagerRequest) -> RequestResponse:
'Handle a request to mutate metadata.'
if (request.attr not in self.MUTABLE_ATTRS_BY_REQUEST):
return RequestResponse(uuid=request.uuid, success=False, reason=f'{request.attr} is not a mutable attribute')
if (len(request.value) == 0):
try:
del self._requested_data[request.attr]
LOGGER.info(f'{request.attr} override has been removed by request')
self.update_status()
except KeyError:
pass
else:
if (request.attr in self._requested_data):
old_value = self._requested_data[request.attr]
else:
old_value = None
try:
self._requested_data[request.attr] = request.value
self.update_status()
LOGGER.info(f'{request.attr} has been overridden to {request.value} by request')
except ValidationError as e:
if (old_value is not None):
self._requested_data[request.attr] = old_value
LOGGER.warning(f'Unable to set {request.attr} to {request.value}.')
LOGGER.warning(str(e))
return RequestResponse(uuid=request.uuid, success=False, reason=f'{request.value} is not a valid value for {request.attr}')
return RequestResponse(uuid=request.uuid, success=True)
| 3,309,932,866,197,272,000
|
Handle a request to mutate metadata.
|
astoria/astmetad/metadata_manager.py
|
handle_mutation_request
|
trickeydan/astoria
|
python
|
async def handle_mutation_request(self, request: MetadataSetManagerRequest) -> RequestResponse:
if (request.attr not in self.MUTABLE_ATTRS_BY_REQUEST):
return RequestResponse(uuid=request.uuid, success=False, reason=f'{request.attr} is not a mutable attribute')
if (len(request.value) == 0):
try:
del self._requested_data[request.attr]
LOGGER.info(f'{request.attr} override has been removed by request')
self.update_status()
except KeyError:
pass
else:
if (request.attr in self._requested_data):
old_value = self._requested_data[request.attr]
else:
old_value = None
try:
self._requested_data[request.attr] = request.value
self.update_status()
LOGGER.info(f'{request.attr} has been overridden to {request.value} by request')
except ValidationError as e:
if (old_value is not None):
self._requested_data[request.attr] = old_value
LOGGER.warning(f'Unable to set {request.attr} to {request.value}.')
LOGGER.warning(str(e))
return RequestResponse(uuid=request.uuid, success=False, reason=f'{request.value} is not a valid value for {request.attr}')
return RequestResponse(uuid=request.uuid, success=True)
|
def get_current_metadata(self) -> Metadata:
'\n Calculate the current metadata.\n\n Takes the default, static metadata based on the config and system\n information. It then overlays data from other sources in a priority order,\n whereby each source has a set of permitted attributes in the metadata that\n can be overridden.\n '
metadata_sources: List[Tuple[(Set[str], Dict[(str, str)])]] = [(self.CACHED_ATTRS, self._cache.data), (self.MUTABLE_ATTRS_BY_REQUEST, self._requested_data)]
for (disk_type, val) in self._lifecycles.items():
if (val is not None):
metadata_sources.append((self.DISK_TYPE_OVERRIDE_MAP[disk_type], val.diff_data))
metadata = Metadata.init(self.config)
for (permitted_attrs, diff_data) in metadata_sources:
for (k, v) in diff_data.items():
if (k in permitted_attrs):
metadata.__setattr__(k, v)
else:
LOGGER.warning(f'There was an attempt to mutate {k}, but it was not permitted.')
for key in self.CACHED_ATTRS:
self._cache.update_cached_attr(key, metadata.__getattribute__(key))
return metadata
| -9,035,408,928,859,739,000
|
Calculate the current metadata.
Takes the default, static metadata based on the config and system
information. It then overlays data from other sources in a priority order,
whereby each source has a set of permitted attributes in the metadata that
can be overridden.
|
astoria/astmetad/metadata_manager.py
|
get_current_metadata
|
trickeydan/astoria
|
python
|
def get_current_metadata(self) -> Metadata:
'\n Calculate the current metadata.\n\n Takes the default, static metadata based on the config and system\n information. It then overlays data from other sources in a priority order,\n whereby each source has a set of permitted attributes in the metadata that\n can be overridden.\n '
metadata_sources: List[Tuple[(Set[str], Dict[(str, str)])]] = [(self.CACHED_ATTRS, self._cache.data), (self.MUTABLE_ATTRS_BY_REQUEST, self._requested_data)]
for (disk_type, val) in self._lifecycles.items():
if (val is not None):
metadata_sources.append((self.DISK_TYPE_OVERRIDE_MAP[disk_type], val.diff_data))
metadata = Metadata.init(self.config)
for (permitted_attrs, diff_data) in metadata_sources:
for (k, v) in diff_data.items():
if (k in permitted_attrs):
metadata.__setattr__(k, v)
else:
LOGGER.warning(f'There was an attempt to mutate {k}, but it was not permitted.')
for key in self.CACHED_ATTRS:
self._cache.update_cached_attr(key, metadata.__getattribute__(key))
return metadata
|
def update_status(self) -> None:
'Update the status of the manager.'
self.status = MetadataManagerMessage(status=MetadataManagerMessage.Status.RUNNING, metadata=self.get_current_metadata())
| 5,634,726,933,070,275,000
|
Update the status of the manager.
|
astoria/astmetad/metadata_manager.py
|
update_status
|
trickeydan/astoria
|
python
|
def update_status(self) -> None:
self.status = MetadataManagerMessage(status=MetadataManagerMessage.Status.RUNNING, metadata=self.get_current_metadata())
|
def _set_nofile(nofile_atleast=4096):
'\n sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on\n parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256\n temporary setting extinguishing with Python session.\n '
try:
import resource as res
except ImportError:
res = None
from .logging import default_logger
if (res is None):
return ((None,) * 2)
(soft, ohard) = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if (soft < nofile_atleast):
soft = nofile_atleast
if (hard < soft):
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
(soft, hard) = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return (soft, hard)
| -2,900,918,797,510,906,400
|
sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
|
jina/__init__.py
|
_set_nofile
|
bsherifi/jina
|
python
|
def _set_nofile(nofile_atleast=4096):
'\n sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on\n parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256\n temporary setting extinguishing with Python session.\n '
try:
import resource as res
except ImportError:
res = None
from .logging import default_logger
if (res is None):
return ((None,) * 2)
(soft, ohard) = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if (soft < nofile_atleast):
soft = nofile_atleast
if (hard < soft):
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
(soft, hard) = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return (soft, hard)
|
def __init__(self, **kwargs):
'Initialize RandomForestClassifier instance.\n '
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(n_estimators=ParameterDefinition(MinMax(min=10, max=111), np.uint))
self.__random_forest_classifier = RF()
| 8,789,940,093,005,560,000
|
Initialize RandomForestClassifier instance.
|
niaaml/classifiers/random_forest.py
|
__init__
|
adi3/NiaAML
|
python
|
def __init__(self, **kwargs):
'\n '
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(n_estimators=ParameterDefinition(MinMax(min=10, max=111), np.uint))
self.__random_forest_classifier = RF()
|
def set_parameters(self, **kwargs):
'Set the parameters/arguments of the algorithm.\n '
self.__random_forest_classifier.set_params(**kwargs)
| -13,568,839,352,867,336
|
Set the parameters/arguments of the algorithm.
|
niaaml/classifiers/random_forest.py
|
set_parameters
|
adi3/NiaAML
|
python
|
def set_parameters(self, **kwargs):
'\n '
self.__random_forest_classifier.set_params(**kwargs)
|
def fit(self, x, y, **kwargs):
'Fit RandomForestClassifier.\n\n Arguments:\n x (pandas.core.frame.DataFrame): n samples to classify.\n y (pandas.core.series.Series): n classes of the samples in the x array.\n\n Returns:\n None\n '
self.__random_forest_classifier.fit(x, y)
| -778,738,233,557,275,900
|
Fit RandomForestClassifier.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
|
niaaml/classifiers/random_forest.py
|
fit
|
adi3/NiaAML
|
python
|
def fit(self, x, y, **kwargs):
'Fit RandomForestClassifier.\n\n Arguments:\n x (pandas.core.frame.DataFrame): n samples to classify.\n y (pandas.core.series.Series): n classes of the samples in the x array.\n\n Returns:\n None\n '
self.__random_forest_classifier.fit(x, y)
|
def predict(self, x, **kwargs):
'Predict class for each sample (row) in x.\n\n Arguments:\n x (pandas.core.frame.DataFrame): n samples to classify.\n\n Returns:\n pandas.core.series.Series: n predicted classes.\n '
return self.__random_forest_classifier.predict(x)
| 3,991,637,054,213,888,000
|
Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
|
niaaml/classifiers/random_forest.py
|
predict
|
adi3/NiaAML
|
python
|
def predict(self, x, **kwargs):
'Predict class for each sample (row) in x.\n\n Arguments:\n x (pandas.core.frame.DataFrame): n samples to classify.\n\n Returns:\n pandas.core.series.Series: n predicted classes.\n '
return self.__random_forest_classifier.predict(x)
|
def to_string(self):
'User friendly representation of the object.\n\n Returns:\n str: User friendly representation of the object.\n '
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__random_forest_classifier.get_params()))
| -5,826,239,005,028,580,000
|
User friendly representation of the object.
Returns:
str: User friendly representation of the object.
|
niaaml/classifiers/random_forest.py
|
to_string
|
adi3/NiaAML
|
python
|
def to_string(self):
'User friendly representation of the object.\n\n Returns:\n str: User friendly representation of the object.\n '
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__random_forest_classifier.get_params()))
|
def list(self, resource_group_name, resource_name, **kwargs):
'List private endpoint connections.\n\n List private endpoint connection properties.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: list of PrivateEndpointConnection, or the result of cls(response)\n :rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-03-01'
accept = 'application/json'
url = self.list.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -7,302,230,787,975,572,000
|
List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py
|
list
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def list(self, resource_group_name, resource_name, **kwargs):
'List private endpoint connections.\n\n List private endpoint connection properties.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: list of PrivateEndpointConnection, or the result of cls(response)\n :rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-03-01'
accept = 'application/json'
url = self.list.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def get(self, resource_group_name, resource_name, private_endpoint_connection_name, **kwargs):
'Get private endpoint connection.\n\n Get private endpoint connection properties.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: PrivateEndpointConnection, or the result of cls(response)\n :rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-03-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -2,780,641,515,259,668,000
|
Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py
|
get
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def get(self, resource_group_name, resource_name, private_endpoint_connection_name, **kwargs):
'Get private endpoint connection.\n\n Get private endpoint connection properties.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: PrivateEndpointConnection, or the result of cls(response)\n :rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-03-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def begin_update(self, resource_group_name, resource_name, private_endpoint_connection_name, private_endpoint_connection, **kwargs):
'Update private endpoint connection.\n\n Update the status of a private endpoint connection with the specified name.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :param private_endpoint_connection: The private endpoint connection with updated properties.\n :type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._update_initial(resource_group_name=resource_group_name, resource_name=resource_name, private_endpoint_connection_name=private_endpoint_connection_name, private_endpoint_connection=private_endpoint_connection, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 3,924,322,189,743,012,000
|
Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py
|
begin_update
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def begin_update(self, resource_group_name, resource_name, private_endpoint_connection_name, private_endpoint_connection, **kwargs):
'Update private endpoint connection.\n\n Update the status of a private endpoint connection with the specified name.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :param private_endpoint_connection: The private endpoint connection with updated properties.\n :type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._update_initial(resource_group_name=resource_group_name, resource_name=resource_name, private_endpoint_connection_name=private_endpoint_connection_name, private_endpoint_connection=private_endpoint_connection, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def begin_delete(self, resource_group_name, resource_name, private_endpoint_connection_name, **kwargs):
'Delete private endpoint connection.\n\n Delete private endpoint connection with the specified name.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, resource_name=resource_name, private_endpoint_connection_name=private_endpoint_connection_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| -6,792,704,747,544,680,000
|
Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py
|
begin_delete
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def begin_delete(self, resource_group_name, resource_name, private_endpoint_connection_name, **kwargs):
'Delete private endpoint connection.\n\n Delete private endpoint connection with the specified name.\n\n :param resource_group_name: The name of the resource group that contains the IoT hub.\n :type resource_group_name: str\n :param resource_name: The name of the IoT hub.\n :type resource_name: str\n :param private_endpoint_connection_name: The name of the private endpoint connection.\n :type private_endpoint_connection_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, resource_name=resource_name, private_endpoint_connection_name=private_endpoint_connection_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'resourceName': self._serialize.url('resource_name', resource_name, 'str'), 'privateEndpointConnectionName': self._serialize.url('private_endpoint_connection_name', private_endpoint_connection_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using forward chaining technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training and validation\n numOutputs (int) : Number of outputs y and ycv used at each training and validation\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
j = 2
while 1:
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
while (i < j):
start_ix = (numJumps * i)
end_ix = (start_ix + numInputs)
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
i += 1
if (((end_ix + numInputs) + numOutputs) > len(sequence)):
break
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
X[(j - 2)] = np.array(X_it)
y[(j - 2)] = np.array(y_it)
Xcv[(j - 2)] = np.array(Xcv_it)
ycv[(j - 2)] = np.array(ycv_it)
j += 1
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
| 8,890,680,324,160,942,000
|
Returns sets to train and cross-validate a model using forward chaining technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training and validation
numOutputs (int) : Number of outputs y and ycv used at each training and validation
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
|
tsxv/splitTrainVal.py
|
split_train_val_forwardChaining
|
DidierRLopes/TimeSeriesCrossValidation
|
python
|
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using forward chaining technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training and validation\n numOutputs (int) : Number of outputs y and ycv used at each training and validation\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
j = 2
while 1:
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
while (i < j):
start_ix = (numJumps * i)
end_ix = (start_ix + numInputs)
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
i += 1
if (((end_ix + numInputs) + numOutputs) > len(sequence)):
break
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
X[(j - 2)] = np.array(X_it)
y[(j - 2)] = np.array(y_it)
Xcv[(j - 2)] = np.array(Xcv_it)
ycv[(j - 2)] = np.array(ycv_it)
j += 1
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
|
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
j = 2
theEnd = 0
while 1:
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
n = 0
while 1:
if (i != j):
start_ix = (endCv_ix + (numJumps * n))
end_ix = (start_ix + numInputs)
n += 1
if ((end_ix + numOutputs) > len(sequence)):
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
else:
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
n = 0
if ((endCv_ix + numOutputs) > len(sequence)):
theEnd = 1
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
i += 1
if (theEnd == 1):
break
X[(j - 2)] = np.array(X_it)
y[(j - 2)] = np.array(y_it)
Xcv[(j - 2)] = np.array(Xcv_it)
ycv[(j - 2)] = np.array(ycv_it)
j += 1
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
| -4,543,209,539,997,643,000
|
Returns sets to train and cross-validate a model using K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
|
tsxv/splitTrainVal.py
|
split_train_val_kFold
|
DidierRLopes/TimeSeriesCrossValidation
|
python
|
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
j = 2
theEnd = 0
while 1:
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
n = 0
while 1:
if (i != j):
start_ix = (endCv_ix + (numJumps * n))
end_ix = (start_ix + numInputs)
n += 1
if ((end_ix + numOutputs) > len(sequence)):
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
else:
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
n = 0
if ((endCv_ix + numOutputs) > len(sequence)):
theEnd = 1
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
i += 1
if (theEnd == 1):
break
X[(j - 2)] = np.array(X_it)
y[(j - 2)] = np.array(y_it)
Xcv[(j - 2)] = np.array(Xcv_it)
ycv[(j - 2)] = np.array(ycv_it)
j += 1
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
|
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using group K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
for j in np.arange(5):
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
n = 0
while 1:
if ((((i + 1) + j) % 5) != 0):
start_ix = (endCv_ix + (numJumps * n))
end_ix = (start_ix + numInputs)
n += 1
if ((end_ix + numOutputs) > (len(sequence) - 1)):
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
else:
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
n = 0
if ((endCv_ix + numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
i += 1
X[j] = np.array(X_it)
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
| -4,419,774,061,934,965,000
|
Returns sets to train and cross-validate a model using group K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
|
tsxv/splitTrainVal.py
|
split_train_val_groupKFold
|
DidierRLopes/TimeSeriesCrossValidation
|
python
|
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
' Returns sets to train and cross-validate a model using group K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n '
(X, y, Xcv, ycv) = (dict(), dict(), dict(), dict())
for j in np.arange(5):
start_ix = 0
end_ix = 0
startCv_ix = 0
endCv_ix = 0
(X_it, y_it, Xcv_it, ycv_it) = (list(), list(), list(), list())
i = 0
n = 0
while 1:
if ((((i + 1) + j) % 5) != 0):
start_ix = (endCv_ix + (numJumps * n))
end_ix = (start_ix + numInputs)
n += 1
if ((end_ix + numOutputs) > (len(sequence) - 1)):
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:(end_ix + numOutputs)]
y_it.append(seq_y)
else:
startCv_ix = end_ix
endCv_ix = (end_ix + numInputs)
n = 0
if ((endCv_ix + numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:(endCv_ix + numOutputs)]
ycv_it.append(seq_ycv)
i += 1
X[j] = np.array(X_it)
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if ((len(X) == 0) or (len(Xcv) == 0)):
print('The sequence provided does not has size enough to populate the return arrays')
return (X, y, Xcv, ycv)
|
def build_block_specs(block_specs=None):
'Builds the list of BlockSpec objects for SpineNet.'
if (not block_specs):
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
| -7,891,216,531,504,436,000
|
Builds the list of BlockSpec objects for SpineNet.
|
official/vision/beta/modeling/backbones/spinenet.py
|
build_block_specs
|
GPhilo/models
|
python
|
def build_block_specs(block_specs=None):
if (not block_specs):
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
|
def __init__(self, input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]), min_level=3, max_level=7, block_specs=build_block_specs(), endpoints_num_filters=256, resample_alpha=0.5, block_repeats=1, filter_size_scale=1.0, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, **kwargs):
'SpineNet model.'
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if (activation == 'relu'):
self._activation_fn = tf.nn.relu
elif (activation == 'swish'):
self._activation_fn = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if (tf.keras.backend.image_data_format() == 'channels_last'):
self._bn_axis = (- 1)
else:
self._bn_axis = 1
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
net = self._build_scale_permuted_network(net=net, input_width=input_specs.shape[1])
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
| -7,501,948,293,069,149,000
|
SpineNet model.
|
official/vision/beta/modeling/backbones/spinenet.py
|
__init__
|
GPhilo/models
|
python
|
def __init__(self, input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]), min_level=3, max_level=7, block_specs=build_block_specs(), endpoints_num_filters=256, resample_alpha=0.5, block_repeats=1, filter_size_scale=1.0, kernel_initializer='VarianceScaling', kernel_regularizer=None, bias_regularizer=None, activation='relu', use_sync_bn=False, norm_momentum=0.99, norm_epsilon=0.001, **kwargs):
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if (activation == 'relu'):
self._activation_fn = tf.nn.relu
elif (activation == 'swish'):
self._activation_fn = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if (tf.keras.backend.image_data_format() == 'channels_last'):
self._bn_axis = (- 1)
else:
self._bn_axis = 1
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
net = self._build_scale_permuted_network(net=net, input_width=input_specs.shape[1])
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
|
def _block_group(self, inputs, filters, strides, block_fn_cand, block_repeats=1, name='block_group'):
'Creates one group of blocks for the SpineNet model.'
block_fn_candidates = {'bottleneck': nn_blocks.BottleneckBlock, 'residual': nn_blocks.ResidualBlock}
block_fn = block_fn_candidates[block_fn_cand]
(_, _, _, num_filters) = inputs.get_shape().as_list()
if (block_fn_cand == 'bottleneck'):
use_projection = (not ((num_filters == (filters * 4)) and (strides == 1)))
else:
use_projection = (not ((num_filters == filters) and (strides == 1)))
x = block_fn(filters=filters, strides=strides, use_projection=use_projection, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(inputs)
for _ in range(1, block_repeats):
x = block_fn(filters=filters, strides=1, use_projection=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(x)
return tf.identity(x, name=name)
| 7,470,388,370,848,342,000
|
Creates one group of blocks for the SpineNet model.
|
official/vision/beta/modeling/backbones/spinenet.py
|
_block_group
|
GPhilo/models
|
python
|
def _block_group(self, inputs, filters, strides, block_fn_cand, block_repeats=1, name='block_group'):
block_fn_candidates = {'bottleneck': nn_blocks.BottleneckBlock, 'residual': nn_blocks.ResidualBlock}
block_fn = block_fn_candidates[block_fn_cand]
(_, _, _, num_filters) = inputs.get_shape().as_list()
if (block_fn_cand == 'bottleneck'):
use_projection = (not ((num_filters == (filters * 4)) and (strides == 1)))
else:
use_projection = (not ((num_filters == filters) and (strides == 1)))
x = block_fn(filters=filters, strides=strides, use_projection=use_projection, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(inputs)
for _ in range(1, block_repeats):
x = block_fn(filters=filters, strides=1, use_projection=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activation=self._activation, use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon)(x)
return tf.identity(x, name=name)
|
def _build_stem(self, inputs):
'Build SpineNet stem.'
x = layers.Conv2D(filters=64, kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(inputs)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
for i in range(self._num_init_blocks):
x = self._block_group(inputs=x, filters=int((FILTER_SIZE_MAP[2] * self._filter_size_scale)), strides=1, block_fn_cand=self._init_block_fn, block_repeats=self._block_repeats, name='stem_block_{}'.format((i + 1)))
net.append(x)
return net
| -1,046,958,632,795,266,400
|
Build SpineNet stem.
|
official/vision/beta/modeling/backbones/spinenet.py
|
_build_stem
|
GPhilo/models
|
python
|
def _build_stem(self, inputs):
x = layers.Conv2D(filters=64, kernel_size=7, strides=2, use_bias=False, padding='same', kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(inputs)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
for i in range(self._num_init_blocks):
x = self._block_group(inputs=x, filters=int((FILTER_SIZE_MAP[2] * self._filter_size_scale)), strides=1, block_fn_cand=self._init_block_fn, block_repeats=self._block_repeats, name='stem_block_{}'.format((i + 1)))
net.append(x)
return net
|
def _build_scale_permuted_network(self, net, input_width, weighted_fusion=False):
'Build scale-permuted network.'
net_sizes = ([int(math.ceil((input_width / (2 ** 2))))] * len(net))
net_block_fns = ([self._init_block_fn] * len(net))
num_outgoing_connections = ([0] * len(net))
endpoints = {}
for (i, block_spec) in enumerate(self._block_specs):
target_width = int(math.ceil((input_width / (2 ** block_spec.level))))
target_num_filters = int((FILTER_SIZE_MAP[block_spec.level] * self._filter_size_scale))
target_block_fn = block_spec.block_fn
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(inputs=net[input0], input_width=net_sizes[input0], input_block_fn=net_block_fns[input0], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(inputs=net[input1], input_width=net_sizes[input1], input_block_fn=net_block_fns[input1], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
if block_spec.is_output:
for (j, (j_feat, j_connections)) in enumerate(zip(net, num_outgoing_connections)):
if ((j_connections == 0) and ((j_feat.shape[2] == target_width) and (j_feat.shape[3] == x0.shape[3]))):
parents.append(j_feat)
num_outgoing_connections[j] += 1
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = tf.add_n(parent_weights)
parents = [((parents[i] * parent_weights[i]) / (weights_sum + 0.0001)) for i in range(len(parents))]
x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents))
x = self._block_group(inputs=x, filters=target_num_filters, strides=1, block_fn_cand=target_block_fn, block_repeats=self._block_repeats, name='scale_permuted_block_{}'.format((i + 1)))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
if block_spec.is_output:
if (block_spec.level in endpoints):
raise ValueError('Duplicate feats found for output level {}.'.format(block_spec.level))
if ((block_spec.level < self._min_level) or (block_spec.level > self._max_level)):
raise ValueError('Output level is out of range [{}, {}]'.format(self._min_level, self._max_level))
endpoints[str(block_spec.level)] = x
return endpoints
| -8,515,593,795,021,783,000
|
Build scale-permuted network.
|
official/vision/beta/modeling/backbones/spinenet.py
|
_build_scale_permuted_network
|
GPhilo/models
|
python
|
def _build_scale_permuted_network(self, net, input_width, weighted_fusion=False):
net_sizes = ([int(math.ceil((input_width / (2 ** 2))))] * len(net))
net_block_fns = ([self._init_block_fn] * len(net))
num_outgoing_connections = ([0] * len(net))
endpoints = {}
for (i, block_spec) in enumerate(self._block_specs):
target_width = int(math.ceil((input_width / (2 ** block_spec.level))))
target_num_filters = int((FILTER_SIZE_MAP[block_spec.level] * self._filter_size_scale))
target_block_fn = block_spec.block_fn
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(inputs=net[input0], input_width=net_sizes[input0], input_block_fn=net_block_fns[input0], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(inputs=net[input1], input_width=net_sizes[input1], input_block_fn=net_block_fns[input1], target_width=target_width, target_num_filters=target_num_filters, target_block_fn=target_block_fn, alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
if block_spec.is_output:
for (j, (j_feat, j_connections)) in enumerate(zip(net, num_outgoing_connections)):
if ((j_connections == 0) and ((j_feat.shape[2] == target_width) and (j_feat.shape[3] == x0.shape[3]))):
parents.append(j_feat)
num_outgoing_connections[j] += 1
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = tf.add_n(parent_weights)
parents = [((parents[i] * parent_weights[i]) / (weights_sum + 0.0001)) for i in range(len(parents))]
x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents))
x = self._block_group(inputs=x, filters=target_num_filters, strides=1, block_fn_cand=target_block_fn, block_repeats=self._block_repeats, name='scale_permuted_block_{}'.format((i + 1)))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
if block_spec.is_output:
if (block_spec.level in endpoints):
raise ValueError('Duplicate feats found for output level {}.'.format(block_spec.level))
if ((block_spec.level < self._min_level) or (block_spec.level > self._max_level)):
raise ValueError('Output level is out of range [{}, {}]'.format(self._min_level, self._max_level))
endpoints[str(block_spec.level)] = x
return endpoints
|
def _build_endpoints(self, net):
'Match filter size for endpoints before sharing conv layers.'
endpoints = {}
for level in range(self._min_level, (self._max_level + 1)):
x = layers.Conv2D(filters=self._endpoints_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(net[str(level)])
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
| 659,790,285,644,945,500
|
Match filter size for endpoints before sharing conv layers.
|
official/vision/beta/modeling/backbones/spinenet.py
|
_build_endpoints
|
GPhilo/models
|
python
|
def _build_endpoints(self, net):
endpoints = {}
for level in range(self._min_level, (self._max_level + 1)):
x = layers.Conv2D(filters=self._endpoints_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(net[str(level)])
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
|
def _resample_with_alpha(self, inputs, input_width, input_block_fn, target_width, target_num_filters, target_block_fn, alpha=0.5):
'Match resolution and feature dimension.'
(_, _, _, input_num_filters) = inputs.get_shape().as_list()
if (input_block_fn == 'bottleneck'):
input_num_filters /= 4
new_num_filters = int((input_num_filters * alpha))
x = layers.Conv2D(filters=new_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(inputs)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
if (input_width > target_width):
x = layers.Conv2D(filters=new_num_filters, kernel_size=3, strides=2, padding='SAME', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(x)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while (input_width > target_width):
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif (input_width < target_width):
scale = (target_width // input_width)
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
if (target_block_fn == 'bottleneck'):
target_num_filters *= 4
x = layers.Conv2D(filters=target_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(x)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
return x
| 144,570,954,614,252,960
|
Match resolution and feature dimension.
|
official/vision/beta/modeling/backbones/spinenet.py
|
_resample_with_alpha
|
GPhilo/models
|
python
|
def _resample_with_alpha(self, inputs, input_width, input_block_fn, target_width, target_num_filters, target_block_fn, alpha=0.5):
(_, _, _, input_num_filters) = inputs.get_shape().as_list()
if (input_block_fn == 'bottleneck'):
input_num_filters /= 4
new_num_filters = int((input_num_filters * alpha))
x = layers.Conv2D(filters=new_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(inputs)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
if (input_width > target_width):
x = layers.Conv2D(filters=new_num_filters, kernel_size=3, strides=2, padding='SAME', use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(x)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while (input_width > target_width):
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif (input_width < target_width):
scale = (target_width // input_width)
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
if (target_block_fn == 'bottleneck'):
target_num_filters *= 4
x = layers.Conv2D(filters=target_num_filters, kernel_size=1, strides=1, use_bias=False, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer)(x)
x = self._norm(axis=self._bn_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon)(x)
return x
|
@property
def output_specs(self):
'A dict of {level: TensorShape} pairs for the model output.'
return self._output_specs
| -6,976,459,066,222,763,000
|
A dict of {level: TensorShape} pairs for the model output.
|
official/vision/beta/modeling/backbones/spinenet.py
|
output_specs
|
GPhilo/models
|
python
|
@property
def output_specs(self):
return self._output_specs
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_surface(self, test_dict):
'Check that the computation of the surface is correct'
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_surface()
a = result
b = test_dict['S_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_surface(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
| 6,061,114,738,152,995,000
|
Check that the computation of the surface is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_surface
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_surface(self, test_dict):
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_surface()
a = result
b = test_dict['S_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_surface(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_surface_active(self, test_dict):
'Check that the computation of the active surface is correct'
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict['SA_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_surface_active(test_obj.slot, Ndisc=1000)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
| -6,730,817,113,055,780,000
|
Check that the computation of the active surface is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_surface_active
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_surface_active(self, test_dict):
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict['SA_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_surface_active(test_obj.slot, Ndisc=1000)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_height(self, test_dict):
'Check that the computation of the height is correct'
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_height()
a = result
b = test_dict['H_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_height(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
| -4,106,914,196,046,679,000
|
Check that the computation of the height is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_height
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_height(self, test_dict):
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_height()
a = result
b = test_dict['H_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
b = comp_height(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_height_active(self, test_dict):
'Check that the computation of the active height is correct'
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_height_active()
a = result
b = test_dict['HA_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
b = comp_height_active(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
| -3,753,773,613,012,595,700
|
Check that the computation of the active height is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_height_active
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_height_active(self, test_dict):
test_obj = test_dict['test_obj']
result = test_obj.slot.comp_height_active()
a = result
b = test_dict['HA_exp']
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
b = comp_height_active(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA)), msg
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_angle_opening(self, test_dict):
'Check that the computation of the average opening angle is correct'
test_obj = test_dict['test_obj']
a = test_obj.slot.comp_angle_opening()
assert (a == pytest.approx(test_dict['Ao'], rel=DELTA))
b = comp_angle_opening(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA))
| 8,504,016,254,974,876,000
|
Check that the computation of the average opening angle is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_angle_opening
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_angle_opening(self, test_dict):
test_obj = test_dict['test_obj']
a = test_obj.slot.comp_angle_opening()
assert (a == pytest.approx(test_dict['Ao'], rel=DELTA))
b = comp_angle_opening(test_obj.slot)
msg = ((('Return ' + str(a)) + ' expected ') + str(b))
assert (a == pytest.approx(b, rel=DELTA))
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_width_opening(self, test_dict):
'Check that the computation of the average opening width is correct'
test_obj = test_dict['test_obj']
a = test_obj.slot.comp_width_opening()
point_dict = test_obj.slot._comp_point_coordinate()
assert (a == pytest.approx(abs((point_dict['Z1'] - point_dict['Z4'])), rel=DELTA))
| -266,564,087,929,575,070
|
Check that the computation of the average opening width is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_width_opening
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_width_opening(self, test_dict):
test_obj = test_dict['test_obj']
a = test_obj.slot.comp_width_opening()
point_dict = test_obj.slot._comp_point_coordinate()
assert (a == pytest.approx(abs((point_dict['Z1'] - point_dict['Z4'])), rel=DELTA))
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_mec_radius(self, test_dict):
'Check that the computation of the mechanical radius is correct'
test_obj = test_dict['test_obj']
a = test_obj.comp_radius_mec()
assert (a == pytest.approx(test_dict['Rmec'], rel=DELTA))
| 7,076,176,831,930,096,000
|
Check that the computation of the mechanical radius is correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_mec_radius
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_mec_radius(self, test_dict):
test_obj = test_dict['test_obj']
a = test_obj.comp_radius_mec()
assert (a == pytest.approx(test_dict['Rmec'], rel=DELTA))
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_point_coordinate(self, test_dict):
'Check that the point coordinates are correct'
test_obj = test_dict['test_obj']
point_dict = test_obj.slot._comp_point_coordinate()
Z1 = point_dict['Z1']
Z2 = point_dict['Z2']
Z3 = point_dict['Z3']
Z4 = point_dict['Z4']
ZM0 = point_dict['ZM0']
ZM1 = point_dict['ZM1']
ZM2 = point_dict['ZM2']
ZM3 = point_dict['ZM3']
ZM4 = point_dict['ZM4']
W0 = test_obj.slot.W0
H0 = test_obj.slot.H0
Wmag = test_obj.slot.Wmag
Hmag = test_obj.slot.Hmag
Rbo = test_obj.get_Rbo()
assert (abs(Z1) == pytest.approx(Rbo, rel=DELTA))
assert (angle(Z1) == pytest.approx(((- W0) / 2), rel=DELTA))
assert (abs(Z4) == pytest.approx(Rbo, rel=DELTA))
assert (angle(Z4) == pytest.approx((W0 / 2), rel=DELTA))
if test_obj.is_internal:
assert (abs(Z2) == pytest.approx((Rbo - H0), rel=DELTA))
assert (abs(Z3) == pytest.approx((Rbo - H0), rel=DELTA))
else:
assert (abs(Z3) == pytest.approx((Rbo + H0), rel=DELTA))
assert (abs(Z2) == pytest.approx((Rbo + H0), rel=DELTA))
assert (angle(Z2) == pytest.approx(((- W0) / 2), rel=DELTA))
assert (angle(Z3) == pytest.approx((W0 / 2), rel=DELTA))
assert (angle(ZM1) == pytest.approx(angle(ZM2), rel=DELTA))
assert (angle(ZM1) == pytest.approx(((- Wmag) / 2), rel=DELTA))
assert (angle(ZM3) == pytest.approx(angle(ZM4), rel=DELTA))
assert (angle(ZM3) == pytest.approx((Wmag / 2), rel=DELTA))
if test_obj.is_internal:
assert (ZM0 == pytest.approx(((Rbo + Hmag) - H0), rel=DELTA))
else:
assert (ZM0 == pytest.approx(((Rbo - Hmag) + H0), rel=DELTA))
| -6,819,790,878,177,716,000
|
Check that the point coordinates are correct
|
Tests/Methods/Slot/test_SlotM14_meth.py
|
test_comp_point_coordinate
|
ajpina/pyleecan
|
python
|
@pytest.mark.parametrize('test_dict', Mag14_test)
def test_comp_point_coordinate(self, test_dict):
test_obj = test_dict['test_obj']
point_dict = test_obj.slot._comp_point_coordinate()
Z1 = point_dict['Z1']
Z2 = point_dict['Z2']
Z3 = point_dict['Z3']
Z4 = point_dict['Z4']
ZM0 = point_dict['ZM0']
ZM1 = point_dict['ZM1']
ZM2 = point_dict['ZM2']
ZM3 = point_dict['ZM3']
ZM4 = point_dict['ZM4']
W0 = test_obj.slot.W0
H0 = test_obj.slot.H0
Wmag = test_obj.slot.Wmag
Hmag = test_obj.slot.Hmag
Rbo = test_obj.get_Rbo()
assert (abs(Z1) == pytest.approx(Rbo, rel=DELTA))
assert (angle(Z1) == pytest.approx(((- W0) / 2), rel=DELTA))
assert (abs(Z4) == pytest.approx(Rbo, rel=DELTA))
assert (angle(Z4) == pytest.approx((W0 / 2), rel=DELTA))
if test_obj.is_internal:
assert (abs(Z2) == pytest.approx((Rbo - H0), rel=DELTA))
assert (abs(Z3) == pytest.approx((Rbo - H0), rel=DELTA))
else:
assert (abs(Z3) == pytest.approx((Rbo + H0), rel=DELTA))
assert (abs(Z2) == pytest.approx((Rbo + H0), rel=DELTA))
assert (angle(Z2) == pytest.approx(((- W0) / 2), rel=DELTA))
assert (angle(Z3) == pytest.approx((W0 / 2), rel=DELTA))
assert (angle(ZM1) == pytest.approx(angle(ZM2), rel=DELTA))
assert (angle(ZM1) == pytest.approx(((- Wmag) / 2), rel=DELTA))
assert (angle(ZM3) == pytest.approx(angle(ZM4), rel=DELTA))
assert (angle(ZM3) == pytest.approx((Wmag / 2), rel=DELTA))
if test_obj.is_internal:
assert (ZM0 == pytest.approx(((Rbo + Hmag) - H0), rel=DELTA))
else:
assert (ZM0 == pytest.approx(((Rbo - Hmag) + H0), rel=DELTA))
|
def masked_logit_cross_entropy(preds, labels, mask):
'Logit cross-entropy loss with masking.'
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(input_tensor=loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
| 7,783,878,588,039,748,000
|
Logit cross-entropy loss with masking.
|
graphsage/metrics.py
|
masked_logit_cross_entropy
|
gelareh1985/GraphSAGE
|
python
|
def masked_logit_cross_entropy(preds, labels, mask):
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(input_tensor=loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
|
def masked_softmax_cross_entropy(preds, labels, mask):
'Softmax cross-entropy loss with masking.'
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=tf.stop_gradient(labels))
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
| 1,409,206,032,238,293,800
|
Softmax cross-entropy loss with masking.
|
graphsage/metrics.py
|
masked_softmax_cross_entropy
|
gelareh1985/GraphSAGE
|
python
|
def masked_softmax_cross_entropy(preds, labels, mask):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=tf.stop_gradient(labels))
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
|
def masked_l2(preds, actuals, mask):
'L2 loss with masking.'
loss = tf.nn.l2_loss(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
loss *= mask
return tf.reduce_mean(input_tensor=loss)
| 2,564,832,346,358,642,000
|
L2 loss with masking.
|
graphsage/metrics.py
|
masked_l2
|
gelareh1985/GraphSAGE
|
python
|
def masked_l2(preds, actuals, mask):
loss = tf.nn.l2_loss(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
loss *= mask
return tf.reduce_mean(input_tensor=loss)
|
def masked_accuracy(preds, labels, mask):
'Accuracy with masking.'
correct_prediction = tf.equal(tf.argmax(input=preds, axis=1), tf.argmax(input=labels, axis=1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
accuracy_all *= mask
return tf.reduce_mean(input_tensor=accuracy_all)
| 39,099,147,810,143,750
|
Accuracy with masking.
|
graphsage/metrics.py
|
masked_accuracy
|
gelareh1985/GraphSAGE
|
python
|
def masked_accuracy(preds, labels, mask):
correct_prediction = tf.equal(tf.argmax(input=preds, axis=1), tf.argmax(input=labels, axis=1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
accuracy_all *= mask
return tf.reduce_mean(input_tensor=accuracy_all)
|
def _def_loss(self, model_fn, env):
'\n returns a module for and the loss\n '
raise NotImplementedError
| 2,794,956,208,641,094,000
|
returns a module for and the loss
|
rl/algorithms/core.py
|
_def_loss
|
cbschaff/nlimb
|
python
|
def _def_loss(self, model_fn, env):
'\n \n '
raise NotImplementedError
|
def _def_opt(self, loss):
'\n returns a module for and the optimizer\n '
raise NotImplementedError
| -5,681,829,422,691,011,000
|
returns a module for and the optimizer
|
rl/algorithms/core.py
|
_def_opt
|
cbschaff/nlimb
|
python
|
def _def_opt(self, loss):
'\n \n '
raise NotImplementedError
|
def setup(bot: Bot) -> None:
' Load the Mute cog. '
bot.add_cog(MuteCog(bot))
log.info('Commands loaded: mutes')
| 6,675,582,806,347,262,000
|
Load the Mute cog.
|
cogs/commands/moderation/mutes.py
|
setup
|
y0usef-2E/chiya
|
python
|
def setup(bot: Bot) -> None:
' '
bot.add_cog(MuteCog(bot))
log.info('Commands loaded: mutes')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.