body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
956aea7af540e77cd313e31851c3d4d1fbec2b02dcf98f53d178824d85d37acf
|
def sign_inputs(self, key_generator):
'\n Sign inputs in a finalized bundle.\n '
if (not self.hash):
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
i = 0
while (i < len(self)):
txn = self[i]
if (txn.value < 0):
if (txn.address.key_index is None):
raise with_context(exc=ValueError('Unable to sign input {input}; ``key_index`` is None (``exc.context`` has more info).'.format(input=txn.address)), context={'transaction': txn})
signature_fragment_generator = self._create_signature_fragment_generator(key_generator, txn)
for j in range(AddressGenerator.DIGEST_ITERATIONS):
self[(i + j)].signature_message_fragment = next(signature_fragment_generator)
i += AddressGenerator.DIGEST_ITERATIONS
else:
i += 1
|
Sign inputs in a finalized bundle.
|
src/cornode/transaction.py
|
sign_inputs
|
Cornode/cornode.lib.py
| 0
|
python
|
def sign_inputs(self, key_generator):
'\n \n '
if (not self.hash):
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
i = 0
while (i < len(self)):
txn = self[i]
if (txn.value < 0):
if (txn.address.key_index is None):
raise with_context(exc=ValueError('Unable to sign input {input}; ``key_index`` is None (``exc.context`` has more info).'.format(input=txn.address)), context={'transaction': txn})
signature_fragment_generator = self._create_signature_fragment_generator(key_generator, txn)
for j in range(AddressGenerator.DIGEST_ITERATIONS):
self[(i + j)].signature_message_fragment = next(signature_fragment_generator)
i += AddressGenerator.DIGEST_ITERATIONS
else:
i += 1
|
def sign_inputs(self, key_generator):
'\n \n '
if (not self.hash):
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
i = 0
while (i < len(self)):
txn = self[i]
if (txn.value < 0):
if (txn.address.key_index is None):
raise with_context(exc=ValueError('Unable to sign input {input}; ``key_index`` is None (``exc.context`` has more info).'.format(input=txn.address)), context={'transaction': txn})
signature_fragment_generator = self._create_signature_fragment_generator(key_generator, txn)
for j in range(AddressGenerator.DIGEST_ITERATIONS):
self[(i + j)].signature_message_fragment = next(signature_fragment_generator)
i += AddressGenerator.DIGEST_ITERATIONS
else:
i += 1<|docstring|>Sign inputs in a finalized bundle.<|endoftext|>
|
faed723fcd44919d62d0983f8341115c938f47815ad11eff2f3e6aecb778096f
|
@staticmethod
def _create_signature_fragment_generator(key_generator, txn):
'\n Creates the SignatureFragmentGenerator to sign inputs.\n\n Split into a separate method so that it can be mocked for unit\n tests.\n '
return SignatureFragmentGenerator(private_key=key_generator.get_keys(start=txn.address.key_index, iterations=AddressGenerator.DIGEST_ITERATIONS)[0], hash_=txn.bundle_hash)
|
Creates the SignatureFragmentGenerator to sign inputs.
Split into a separate method so that it can be mocked for unit
tests.
|
src/cornode/transaction.py
|
_create_signature_fragment_generator
|
Cornode/cornode.lib.py
| 0
|
python
|
@staticmethod
def _create_signature_fragment_generator(key_generator, txn):
'\n Creates the SignatureFragmentGenerator to sign inputs.\n\n Split into a separate method so that it can be mocked for unit\n tests.\n '
return SignatureFragmentGenerator(private_key=key_generator.get_keys(start=txn.address.key_index, iterations=AddressGenerator.DIGEST_ITERATIONS)[0], hash_=txn.bundle_hash)
|
@staticmethod
def _create_signature_fragment_generator(key_generator, txn):
'\n Creates the SignatureFragmentGenerator to sign inputs.\n\n Split into a separate method so that it can be mocked for unit\n tests.\n '
return SignatureFragmentGenerator(private_key=key_generator.get_keys(start=txn.address.key_index, iterations=AddressGenerator.DIGEST_ITERATIONS)[0], hash_=txn.bundle_hash)<|docstring|>Creates the SignatureFragmentGenerator to sign inputs.
Split into a separate method so that it can be mocked for unit
tests.<|endoftext|>
|
ec9158a6dd9b1b6916c45cad95e311f2fb2769925387c088ef8dbd6794e2a9be
|
def __init__(self, addrType=None, accessType=None, addr=None, port=None):
'\n :param addrType: (Optional) 连接信息的类型,目前支持以下类型:<br>- database 通常数据访问,读写等 <br>- pd 数据迁移时连接PD节点 <br>- monitor 查看监控数据 <br>- dms 使用DMS客户端,访问数据库 <br>-其他需要的类型等,各产品可视需要添加<br>参数大小敏感\n :param accessType: (Optional) 从K8S集群外部访问实例的方式,目前支持以下两种类型 - NodePort - LoadBalancer 参数大小敏感\n :param addr: (Optional) 从K8S集群外部访问实例的地址,如域名或IP\n :param port: (Optional) 端口\n '
self.addrType = addrType
self.accessType = accessType
self.addr = addr
self.port = port
|
:param addrType: (Optional) 连接信息的类型,目前支持以下类型:<br>- database 通常数据访问,读写等 <br>- pd 数据迁移时连接PD节点 <br>- monitor 查看监控数据 <br>- dms 使用DMS客户端,访问数据库 <br>-其他需要的类型等,各产品可视需要添加<br>参数大小敏感
:param accessType: (Optional) 从K8S集群外部访问实例的方式,目前支持以下两种类型 - NodePort - LoadBalancer 参数大小敏感
:param addr: (Optional) 从K8S集群外部访问实例的地址,如域名或IP
:param port: (Optional) 端口
|
jdcloud_sdk/services/rds/models/K8SServiceAddr.py
|
__init__
|
jdcloud-apigateway/jdcloud-sdk-python
| 14
|
python
|
def __init__(self, addrType=None, accessType=None, addr=None, port=None):
'\n :param addrType: (Optional) 连接信息的类型,目前支持以下类型:<br>- database 通常数据访问,读写等 <br>- pd 数据迁移时连接PD节点 <br>- monitor 查看监控数据 <br>- dms 使用DMS客户端,访问数据库 <br>-其他需要的类型等,各产品可视需要添加<br>参数大小敏感\n :param accessType: (Optional) 从K8S集群外部访问实例的方式,目前支持以下两种类型 - NodePort - LoadBalancer 参数大小敏感\n :param addr: (Optional) 从K8S集群外部访问实例的地址,如域名或IP\n :param port: (Optional) 端口\n '
self.addrType = addrType
self.accessType = accessType
self.addr = addr
self.port = port
|
def __init__(self, addrType=None, accessType=None, addr=None, port=None):
'\n :param addrType: (Optional) 连接信息的类型,目前支持以下类型:<br>- database 通常数据访问,读写等 <br>- pd 数据迁移时连接PD节点 <br>- monitor 查看监控数据 <br>- dms 使用DMS客户端,访问数据库 <br>-其他需要的类型等,各产品可视需要添加<br>参数大小敏感\n :param accessType: (Optional) 从K8S集群外部访问实例的方式,目前支持以下两种类型 - NodePort - LoadBalancer 参数大小敏感\n :param addr: (Optional) 从K8S集群外部访问实例的地址,如域名或IP\n :param port: (Optional) 端口\n '
self.addrType = addrType
self.accessType = accessType
self.addr = addr
self.port = port<|docstring|>:param addrType: (Optional) 连接信息的类型,目前支持以下类型:<br>- database 通常数据访问,读写等 <br>- pd 数据迁移时连接PD节点 <br>- monitor 查看监控数据 <br>- dms 使用DMS客户端,访问数据库 <br>-其他需要的类型等,各产品可视需要添加<br>参数大小敏感
:param accessType: (Optional) 从K8S集群外部访问实例的方式,目前支持以下两种类型 - NodePort - LoadBalancer 参数大小敏感
:param addr: (Optional) 从K8S集群外部访问实例的地址,如域名或IP
:param port: (Optional) 端口<|endoftext|>
|
b7f9f86e1830a7cbc9e9005164a2ddfa1b000c94823584927e208317dee17e36
|
def get_amount(value):
'\n Extracts decimal value from Money or Expression.\n '
if isinstance(value, MONEY_CLASSES):
return value.amount
elif (isinstance(value, BaseExpression) and (not isinstance(value, F))):
return get_amount(value.value)
return value
|
Extracts decimal value from Money or Expression.
|
djmoney/utils.py
|
get_amount
|
mikeiwi/django-money-fork
| 0
|
python
|
def get_amount(value):
'\n \n '
if isinstance(value, MONEY_CLASSES):
return value.amount
elif (isinstance(value, BaseExpression) and (not isinstance(value, F))):
return get_amount(value.value)
return value
|
def get_amount(value):
'\n \n '
if isinstance(value, MONEY_CLASSES):
return value.amount
elif (isinstance(value, BaseExpression) and (not isinstance(value, F))):
return get_amount(value.value)
return value<|docstring|>Extracts decimal value from Money or Expression.<|endoftext|>
|
920006774ba75cf17881d0790e6e96e4771381ffba0b39deb9bd1225b16aa7b2
|
def prepare_expression(expr):
'\n Prepares some complex money expression to be used in query.\n '
if isinstance(expr.rhs, F):
(target, return_value) = (expr.lhs, expr.rhs)
else:
(target, return_value) = (expr.rhs, expr.lhs)
amount = get_amount(target)
target.value = amount
return return_value
|
Prepares some complex money expression to be used in query.
|
djmoney/utils.py
|
prepare_expression
|
mikeiwi/django-money-fork
| 0
|
python
|
def prepare_expression(expr):
'\n \n '
if isinstance(expr.rhs, F):
(target, return_value) = (expr.lhs, expr.rhs)
else:
(target, return_value) = (expr.rhs, expr.lhs)
amount = get_amount(target)
target.value = amount
return return_value
|
def prepare_expression(expr):
'\n \n '
if isinstance(expr.rhs, F):
(target, return_value) = (expr.lhs, expr.rhs)
else:
(target, return_value) = (expr.rhs, expr.lhs)
amount = get_amount(target)
target.value = amount
return return_value<|docstring|>Prepares some complex money expression to be used in query.<|endoftext|>
|
ed8e991b500b0a3d590ab1c7db40eb9615ba12ecb34037b9f009d55c2eb1b888
|
def MycoplasmaBovis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph:
'Return new instance of the Mycoplasma bovis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Mycoplasma bovis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:19:48.748050\n\t\n\tThe undirected graph Mycoplasma bovis has 746 nodes and 32435 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.11672 and has 5 connected components, where the component\n\twith most nodes has 737 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 60, the mean node degree is 86.96,\n\tand the node degree mode is 16. The top 5 most central nodes are 289397.MBOVPG45_0337\n\t(degree 292), 289397.MBOVPG45_0160 (degree 291), 289397.MBOVPG45_0403 (degree\n\t291), 289397.MBOVPG45_0258 (degree 278) and 289397.MBOVPG45_0595 (degree\n\t277).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import MycoplasmaBovis\n\t\n\t # Then load the graph\n\t graph = MycoplasmaBovis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n '
return AutomaticallyRetrievedGraph(graph_name='MycoplasmaBovis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
|
Return new instance of the Mycoplasma bovis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Mycoplasma bovis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:19:48.748050
The undirected graph Mycoplasma bovis has 746 nodes and 32435 weighted
edges, of which none are self-loops. The graph is quite dense as it has
a density of 0.11672 and has 5 connected components, where the component
with most nodes has 737 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 60, the mean node degree is 86.96,
and the node degree mode is 16. The top 5 most central nodes are 289397.MBOVPG45_0337
(degree 292), 289397.MBOVPG45_0160 (degree 291), 289397.MBOVPG45_0403 (degree
291), 289397.MBOVPG45_0258 (degree 278) and 289397.MBOVPG45_0595 (degree
277).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import MycoplasmaBovis
# Then load the graph
graph = MycoplasmaBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
|
bindings/python/ensmallen_graph/datasets/string/mycoplasmabovis.py
|
MycoplasmaBovis
|
caufieldjh/ensmallen_graph
| 0
|
python
|
def MycoplasmaBovis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph:
'Return new instance of the Mycoplasma bovis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Mycoplasma bovis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:19:48.748050\n\t\n\tThe undirected graph Mycoplasma bovis has 746 nodes and 32435 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.11672 and has 5 connected components, where the component\n\twith most nodes has 737 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 60, the mean node degree is 86.96,\n\tand the node degree mode is 16. The top 5 most central nodes are 289397.MBOVPG45_0337\n\t(degree 292), 289397.MBOVPG45_0160 (degree 291), 289397.MBOVPG45_0403 (degree\n\t291), 289397.MBOVPG45_0258 (degree 278) and 289397.MBOVPG45_0595 (degree\n\t277).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import MycoplasmaBovis\n\t\n\t # Then load the graph\n\t graph = MycoplasmaBovis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n '
return AutomaticallyRetrievedGraph(graph_name='MycoplasmaBovis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
|
def MycoplasmaBovis(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph:
'Return new instance of the Mycoplasma bovis graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Mycoplasma bovis graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 20:19:48.748050\n\t\n\tThe undirected graph Mycoplasma bovis has 746 nodes and 32435 weighted\n\tedges, of which none are self-loops. The graph is quite dense as it has\n\ta density of 0.11672 and has 5 connected components, where the component\n\twith most nodes has 737 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 60, the mean node degree is 86.96,\n\tand the node degree mode is 16. The top 5 most central nodes are 289397.MBOVPG45_0337\n\t(degree 292), 289397.MBOVPG45_0160 (degree 291), 289397.MBOVPG45_0403 (degree\n\t291), 289397.MBOVPG45_0258 (degree 278) and 289397.MBOVPG45_0595 (degree\n\t277).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import MycoplasmaBovis\n\t\n\t # Then load the graph\n\t graph = MycoplasmaBovis()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n '
return AutomaticallyRetrievedGraph(graph_name='MycoplasmaBovis', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the Mycoplasma bovis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Mycoplasma bovis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:19:48.748050
The undirected graph Mycoplasma bovis has 746 nodes and 32435 weighted
edges, of which none are self-loops. The graph is quite dense as it has
a density of 0.11672 and has 5 connected components, where the component
with most nodes has 737 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 60, the mean node degree is 86.96,
and the node degree mode is 16. The top 5 most central nodes are 289397.MBOVPG45_0337
(degree 292), 289397.MBOVPG45_0160 (degree 291), 289397.MBOVPG45_0403 (degree
291), 289397.MBOVPG45_0258 (degree 278) and 289397.MBOVPG45_0595 (degree
277).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import MycoplasmaBovis
# Then load the graph
graph = MycoplasmaBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.<|endoftext|>
|
9f59c1b2171ea5ad5218805f0e16c34fde97d25340cff3e5853edf6be5e59750
|
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Operation) -> 'ManageSellOffer':
'Creates a :class:`ManageSellOffer` object from an XDR Operation object.'
source = Operation.get_source_from_xdr_obj(xdr_object)
assert (xdr_object.body.manage_sell_offer_op is not None)
selling = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.selling)
buying = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.buying)
amount = Operation.from_xdr_amount(xdr_object.body.manage_sell_offer_op.amount.int64)
price = Price.from_xdr_object(xdr_object.body.manage_sell_offer_op.price)
offer_id = xdr_object.body.manage_sell_offer_op.offer_id.int64
op = cls(source=source, selling=selling, buying=buying, amount=amount, price=price, offer_id=offer_id)
return op
|
Creates a :class:`ManageSellOffer` object from an XDR Operation object.
|
stellar_sdk/operation/manage_sell_offer.py
|
from_xdr_object
|
garantor/py-stellar-base
| 0
|
python
|
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Operation) -> 'ManageSellOffer':
source = Operation.get_source_from_xdr_obj(xdr_object)
assert (xdr_object.body.manage_sell_offer_op is not None)
selling = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.selling)
buying = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.buying)
amount = Operation.from_xdr_amount(xdr_object.body.manage_sell_offer_op.amount.int64)
price = Price.from_xdr_object(xdr_object.body.manage_sell_offer_op.price)
offer_id = xdr_object.body.manage_sell_offer_op.offer_id.int64
op = cls(source=source, selling=selling, buying=buying, amount=amount, price=price, offer_id=offer_id)
return op
|
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Operation) -> 'ManageSellOffer':
source = Operation.get_source_from_xdr_obj(xdr_object)
assert (xdr_object.body.manage_sell_offer_op is not None)
selling = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.selling)
buying = Asset.from_xdr_object(xdr_object.body.manage_sell_offer_op.buying)
amount = Operation.from_xdr_amount(xdr_object.body.manage_sell_offer_op.amount.int64)
price = Price.from_xdr_object(xdr_object.body.manage_sell_offer_op.price)
offer_id = xdr_object.body.manage_sell_offer_op.offer_id.int64
op = cls(source=source, selling=selling, buying=buying, amount=amount, price=price, offer_id=offer_id)
return op<|docstring|>Creates a :class:`ManageSellOffer` object from an XDR Operation object.<|endoftext|>
|
43d628b1fcf2422850c918da57f6590d68551e9071573d332a7fc4069b971b6b
|
def __init__(self, config, content_type):
'\n Initializes a client object.\n\n :param root_uri: the base URL of Rexster.\n\n '
self.config = config
self.content_type = content_type
self.user_agent = ('bulbs/%s' % bulbs.__version__)
if (config.timeout is not None):
self.http = httplib2.Http(timeout=int(config.timeout))
else:
self.http = httplib2.Http()
self._add_credentials(config.username, config.password)
self._initialize()
|
Initializes a client object.
:param root_uri: the base URL of Rexster.
|
bulbs/rest.py
|
__init__
|
teleological/bulbs
| 234
|
python
|
def __init__(self, config, content_type):
'\n Initializes a client object.\n\n :param root_uri: the base URL of Rexster.\n\n '
self.config = config
self.content_type = content_type
self.user_agent = ('bulbs/%s' % bulbs.__version__)
if (config.timeout is not None):
self.http = httplib2.Http(timeout=int(config.timeout))
else:
self.http = httplib2.Http()
self._add_credentials(config.username, config.password)
self._initialize()
|
def __init__(self, config, content_type):
'\n Initializes a client object.\n\n :param root_uri: the base URL of Rexster.\n\n '
self.config = config
self.content_type = content_type
self.user_agent = ('bulbs/%s' % bulbs.__version__)
if (config.timeout is not None):
self.http = httplib2.Http(timeout=int(config.timeout))
else:
self.http = httplib2.Http()
self._add_credentials(config.username, config.password)
self._initialize()<|docstring|>Initializes a client object.
:param root_uri: the base URL of Rexster.<|endoftext|>
|
228da5a6ca33132e3ce0560bc575f466a17f1e357af539e12c141f06998fd709
|
def get(self, path, params=None):
'\n Convenience method that sends GET requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(GET, path, params)
|
Convenience method that sends GET requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
get
|
teleological/bulbs
| 234
|
python
|
def get(self, path, params=None):
'\n Convenience method that sends GET requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(GET, path, params)
|
def get(self, path, params=None):
'\n Convenience method that sends GET requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(GET, path, params)<|docstring|>Convenience method that sends GET requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
3fb4cd3ed50c9cdf8cec48fe2f96bbeae58017450d99b767e69274d6ebbbaaf3
|
def put(self, path, params=None):
'\n Convenience method that sends PUT requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(PUT, path, params)
|
Convenience method that sends PUT requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
put
|
teleological/bulbs
| 234
|
python
|
def put(self, path, params=None):
'\n Convenience method that sends PUT requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(PUT, path, params)
|
def put(self, path, params=None):
'\n Convenience method that sends PUT requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(PUT, path, params)<|docstring|>Convenience method that sends PUT requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
fa6e5cb3e7cc5eb0badbc2bf384f209677a9e07340b8c5b6d27d97b8f62c2964
|
def post(self, path, params=None):
'\n Convenience method that sends POST requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(POST, path, params)
|
Convenience method that sends POST requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
post
|
teleological/bulbs
| 234
|
python
|
def post(self, path, params=None):
'\n Convenience method that sends POST requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(POST, path, params)
|
def post(self, path, params=None):
'\n Convenience method that sends POST requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(POST, path, params)<|docstring|>Convenience method that sends POST requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
26ddd460e2072737c9bb55c7c0c0cecf436693ac68a5b8dbbf8592f5d34456a7
|
def delete(self, path, params=None):
'\n Convenience method that sends DELETE requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(DELETE, path, params)
|
Convenience method that sends DELETE requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
delete
|
teleological/bulbs
| 234
|
python
|
def delete(self, path, params=None):
'\n Convenience method that sends DELETE requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(DELETE, path, params)
|
def delete(self, path, params=None):
'\n Convenience method that sends DELETE requests to the client.\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
return self.request(DELETE, path, params)<|docstring|>Convenience method that sends DELETE requests to the client.
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
f9bbbbfb252563e13bd68a3e1712af4bd14feb487dcb592cd4bcc6f1583570a1
|
def send(self, message):
'\n Convenience method that sends request messages to the client.\n\n :param message: Tuple containing: (HTTP method, path, params)\n :type path: tuple\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(method, path, params) = message
return self.request(method, path, params)
|
Convenience method that sends request messages to the client.
:param message: Tuple containing: (HTTP method, path, params)
:type path: tuple
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
send
|
teleological/bulbs
| 234
|
python
|
def send(self, message):
'\n Convenience method that sends request messages to the client.\n\n :param message: Tuple containing: (HTTP method, path, params)\n :type path: tuple\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(method, path, params) = message
return self.request(method, path, params)
|
def send(self, message):
'\n Convenience method that sends request messages to the client.\n\n :param message: Tuple containing: (HTTP method, path, params)\n :type path: tuple\n\n :param params: Optional URI params for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(method, path, params) = message
return self.request(method, path, params)<|docstring|>Convenience method that sends request messages to the client.
:param message: Tuple containing: (HTTP method, path, params)
:type path: tuple
:param params: Optional URI params for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
28ea1514a02a00cba369681412d289300314881bea6d32c6cdf4e0e4112f2b63
|
def request(self, method, path, params):
'\n Sends a request to the client.\n\n :param method: HTTP method: GET, PUT, POST, or DELETE.\n :type method: str\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI parameters for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(uri, method, body, headers) = self._build_request_args(path, method, params)
self._display_debug(uri, method, body)
http_resp = self.http.request(uri, method, body, headers)
return self.response_class(http_resp, self.config)
|
Sends a request to the client.
:param method: HTTP method: GET, PUT, POST, or DELETE.
:type method: str
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI parameters for the resource.
:type params: dict
:rtype: Response
|
bulbs/rest.py
|
request
|
teleological/bulbs
| 234
|
python
|
def request(self, method, path, params):
'\n Sends a request to the client.\n\n :param method: HTTP method: GET, PUT, POST, or DELETE.\n :type method: str\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI parameters for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(uri, method, body, headers) = self._build_request_args(path, method, params)
self._display_debug(uri, method, body)
http_resp = self.http.request(uri, method, body, headers)
return self.response_class(http_resp, self.config)
|
def request(self, method, path, params):
'\n Sends a request to the client.\n\n :param method: HTTP method: GET, PUT, POST, or DELETE.\n :type method: str\n\n :param path: Path to the server resource, relative to the root URI.\n :type path: str\n\n :param params: Optional URI parameters for the resource.\n :type params: dict\n\n :rtype: Response\n\n '
(uri, method, body, headers) = self._build_request_args(path, method, params)
self._display_debug(uri, method, body)
http_resp = self.http.request(uri, method, body, headers)
return self.response_class(http_resp, self.config)<|docstring|>Sends a request to the client.
:param method: HTTP method: GET, PUT, POST, or DELETE.
:type method: str
:param path: Path to the server resource, relative to the root URI.
:type path: str
:param params: Optional URI parameters for the resource.
:type params: dict
:rtype: Response<|endoftext|>
|
ce88c15ba6c716180c551f23395242370efa774e502896b6044f336e13500345
|
def main():
'\n main func\n :return:\n '
heuristic_type = 'euclidean'
env = Map(21, 21, heuristic_type=heuristic_type)
(obs, free) = obstacles.get_rug_obs(env.x_range, env.y_range)
env.update_obs(obs, free)
start = (1, 1)
goal = (20, 20)
demo = AnytimeDStar(env, start, goal, 2.0, heuristic_type)
demo.run()
|
main func
:return:
|
algorithm/A_star/AnytimeDStar.py
|
main
|
G-H-Li/PathNavigation
| 1
|
python
|
def main():
'\n main func\n :return:\n '
heuristic_type = 'euclidean'
env = Map(21, 21, heuristic_type=heuristic_type)
(obs, free) = obstacles.get_rug_obs(env.x_range, env.y_range)
env.update_obs(obs, free)
start = (1, 1)
goal = (20, 20)
demo = AnytimeDStar(env, start, goal, 2.0, heuristic_type)
demo.run()
|
def main():
'\n main func\n :return:\n '
heuristic_type = 'euclidean'
env = Map(21, 21, heuristic_type=heuristic_type)
(obs, free) = obstacles.get_rug_obs(env.x_range, env.y_range)
env.update_obs(obs, free)
start = (1, 1)
goal = (20, 20)
demo = AnytimeDStar(env, start, goal, 2.0, heuristic_type)
demo.run()<|docstring|>main func
:return:<|endoftext|>
|
ab0328a5abc501245cb5014db69a6d6e453f943e480c133c8c7aae35d4c08607
|
def __init__(self, env, start, goal, eps, heuristic_type):
'\n 构造函数\n :param env: 算法运行的地图 Map\n :param eps: 膨胀因子\n :param heuristic_type: 启发类型,计算距离的方法\n '
self.heuristic_type = heuristic_type
self.eps = eps
self.goal = goal
self.start = start
self.env = env
(self.g, self.rhs, self.OPEN, self.INCONS) = ({}, {}, {}, {})
self.CLOSED = set()
self.init_data()
self.visited = set()
self.FLAG = 'Anytime D Star:'
self.path = []
self.error = 0
|
构造函数
:param env: 算法运行的地图 Map
:param eps: 膨胀因子
:param heuristic_type: 启发类型,计算距离的方法
|
algorithm/A_star/AnytimeDStar.py
|
__init__
|
G-H-Li/PathNavigation
| 1
|
python
|
def __init__(self, env, start, goal, eps, heuristic_type):
'\n 构造函数\n :param env: 算法运行的地图 Map\n :param eps: 膨胀因子\n :param heuristic_type: 启发类型,计算距离的方法\n '
self.heuristic_type = heuristic_type
self.eps = eps
self.goal = goal
self.start = start
self.env = env
(self.g, self.rhs, self.OPEN, self.INCONS) = ({}, {}, {}, {})
self.CLOSED = set()
self.init_data()
self.visited = set()
self.FLAG = 'Anytime D Star:'
self.path = []
self.error = 0
|
def __init__(self, env, start, goal, eps, heuristic_type):
'\n 构造函数\n :param env: 算法运行的地图 Map\n :param eps: 膨胀因子\n :param heuristic_type: 启发类型,计算距离的方法\n '
self.heuristic_type = heuristic_type
self.eps = eps
self.goal = goal
self.start = start
self.env = env
(self.g, self.rhs, self.OPEN, self.INCONS) = ({}, {}, {}, {})
self.CLOSED = set()
self.init_data()
self.visited = set()
self.FLAG = 'Anytime D Star:'
self.path = []
self.error = 0<|docstring|>构造函数
:param env: 算法运行的地图 Map
:param eps: 膨胀因子
:param heuristic_type: 启发类型,计算距离的方法<|endoftext|>
|
9d184f699f28c235c2776efe7fea9c936b3fc7456921f02667a0769d73713dd5
|
def get_h(self, start, goal):
'\n 根据起始点、结束点、启发方法来计算两点之间距离\n :param goal: 结束点\n :param start: 起始点\n :return:\n '
heuristic_type = self.heuristic_type
if (heuristic_type == 'manhattan'):
return (abs((goal[0] - start[0])) + abs((goal[1] - start[1])))
elif (heuristic_type == 'euclidean'):
return math.hypot((goal[0] - start[0]), (goal[1] - start[1]))
|
根据起始点、结束点、启发方法来计算两点之间距离
:param goal: 结束点
:param start: 起始点
:return:
|
algorithm/A_star/AnytimeDStar.py
|
get_h
|
G-H-Li/PathNavigation
| 1
|
python
|
def get_h(self, start, goal):
'\n 根据起始点、结束点、启发方法来计算两点之间距离\n :param goal: 结束点\n :param start: 起始点\n :return:\n '
heuristic_type = self.heuristic_type
if (heuristic_type == 'manhattan'):
return (abs((goal[0] - start[0])) + abs((goal[1] - start[1])))
elif (heuristic_type == 'euclidean'):
return math.hypot((goal[0] - start[0]), (goal[1] - start[1]))
|
def get_h(self, start, goal):
'\n 根据起始点、结束点、启发方法来计算两点之间距离\n :param goal: 结束点\n :param start: 起始点\n :return:\n '
heuristic_type = self.heuristic_type
if (heuristic_type == 'manhattan'):
return (abs((goal[0] - start[0])) + abs((goal[1] - start[1])))
elif (heuristic_type == 'euclidean'):
return math.hypot((goal[0] - start[0]), (goal[1] - start[1]))<|docstring|>根据起始点、结束点、启发方法来计算两点之间距离
:param goal: 结束点
:param start: 起始点
:return:<|endoftext|>
|
83646346b5c7fbe779c9776510e174e1e04e89ca52c50bf25cde7e5c1ad4a27d
|
def get_key(self, s):
'\n 获取输入点的key值\n :param s: 数组 点\n :return: [start至当前点的代价, 当前点到goal的代价]\n '
if (self.g[s] > self.rhs[s]):
return [(self.rhs[s] + (self.eps * self.get_h(self.start, s))), self.rhs[s]]
else:
return [(self.g[s] + self.get_h(self.start, s)), self.g[s]]
|
获取输入点的key值
:param s: 数组 点
:return: [start至当前点的代价, 当前点到goal的代价]
|
algorithm/A_star/AnytimeDStar.py
|
get_key
|
G-H-Li/PathNavigation
| 1
|
python
|
def get_key(self, s):
'\n 获取输入点的key值\n :param s: 数组 点\n :return: [start至当前点的代价, 当前点到goal的代价]\n '
if (self.g[s] > self.rhs[s]):
return [(self.rhs[s] + (self.eps * self.get_h(self.start, s))), self.rhs[s]]
else:
return [(self.g[s] + self.get_h(self.start, s)), self.g[s]]
|
def get_key(self, s):
'\n 获取输入点的key值\n :param s: 数组 点\n :return: [start至当前点的代价, 当前点到goal的代价]\n '
if (self.g[s] > self.rhs[s]):
return [(self.rhs[s] + (self.eps * self.get_h(self.start, s))), self.rhs[s]]
else:
return [(self.g[s] + self.get_h(self.start, s)), self.g[s]]<|docstring|>获取输入点的key值
:param s: 数组 点
:return: [start至当前点的代价, 当前点到goal的代价]<|endoftext|>
|
0b0fa478ad4d1a5d8913a87f8d28cbd32b5a198eb305d2d76668e7519aeb5e09
|
def is_reachable(self, start, goal):
'\n 判断此点motion可达的点是否是障碍物\n :param start: 起始点\n :param goal: 结束点\n :return: boolean\n '
if ((start[0] != goal[0]) and (start[1] != goal[1])):
return True
heuristic_type = self.heuristic_type
if ((start in self.env.obs) or (goal in self.env.obs)):
return False
elif (heuristic_type == 'manhattan'):
return True
elif (heuristic_type == 'euclidean'):
if ((start[0] - goal[0]) == (goal[1] - start[1])):
s1 = (min(start[0], goal[0]), min(start[1], goal[1]))
s2 = (max(start[0], goal[0]), max(start[1], goal[1]))
else:
s1 = (min(start[0], goal[0]), max(start[1], goal[1]))
s2 = (max(start[0], goal[0]), min(start[1], goal[1]))
if ((s1 in self.env.obs) and (s2 in self.env.obs)):
return False
return True
|
判断此点motion可达的点是否是障碍物
:param start: 起始点
:param goal: 结束点
:return: boolean
|
algorithm/A_star/AnytimeDStar.py
|
is_reachable
|
G-H-Li/PathNavigation
| 1
|
python
|
def is_reachable(self, start, goal):
'\n 判断此点motion可达的点是否是障碍物\n :param start: 起始点\n :param goal: 结束点\n :return: boolean\n '
if ((start[0] != goal[0]) and (start[1] != goal[1])):
return True
heuristic_type = self.heuristic_type
if ((start in self.env.obs) or (goal in self.env.obs)):
return False
elif (heuristic_type == 'manhattan'):
return True
elif (heuristic_type == 'euclidean'):
if ((start[0] - goal[0]) == (goal[1] - start[1])):
s1 = (min(start[0], goal[0]), min(start[1], goal[1]))
s2 = (max(start[0], goal[0]), max(start[1], goal[1]))
else:
s1 = (min(start[0], goal[0]), max(start[1], goal[1]))
s2 = (max(start[0], goal[0]), min(start[1], goal[1]))
if ((s1 in self.env.obs) and (s2 in self.env.obs)):
return False
return True
|
def is_reachable(self, start, goal):
'\n 判断此点motion可达的点是否是障碍物\n :param start: 起始点\n :param goal: 结束点\n :return: boolean\n '
if ((start[0] != goal[0]) and (start[1] != goal[1])):
return True
heuristic_type = self.heuristic_type
if ((start in self.env.obs) or (goal in self.env.obs)):
return False
elif (heuristic_type == 'manhattan'):
return True
elif (heuristic_type == 'euclidean'):
if ((start[0] - goal[0]) == (goal[1] - start[1])):
s1 = (min(start[0], goal[0]), min(start[1], goal[1]))
s2 = (max(start[0], goal[0]), max(start[1], goal[1]))
else:
s1 = (min(start[0], goal[0]), max(start[1], goal[1]))
s2 = (max(start[0], goal[0]), min(start[1], goal[1]))
if ((s1 in self.env.obs) and (s2 in self.env.obs)):
return False
return True<|docstring|>判断此点motion可达的点是否是障碍物
:param start: 起始点
:param goal: 结束点
:return: boolean<|endoftext|>
|
2717e9d9c50874cd09c5a653e38b47a81338495c3e2f49d95d3d4832dab60089
|
def get_cost(self, start, goal):
'\n 根据起止点计算代价,一般是临近点\n :param start:\n :param goal:\n :return:\n '
if (not self.is_reachable(start, goal)):
return float('inf')
else:
return self.get_h(start, goal)
|
根据起止点计算代价,一般是临近点
:param start:
:param goal:
:return:
|
algorithm/A_star/AnytimeDStar.py
|
get_cost
|
G-H-Li/PathNavigation
| 1
|
python
|
def get_cost(self, start, goal):
'\n 根据起止点计算代价,一般是临近点\n :param start:\n :param goal:\n :return:\n '
if (not self.is_reachable(start, goal)):
return float('inf')
else:
return self.get_h(start, goal)
|
def get_cost(self, start, goal):
'\n 根据起止点计算代价,一般是临近点\n :param start:\n :param goal:\n :return:\n '
if (not self.is_reachable(start, goal)):
return float('inf')
else:
return self.get_h(start, goal)<|docstring|>根据起止点计算代价,一般是临近点
:param start:
:param goal:
:return:<|endoftext|>
|
a95d9e8632865dc2e436b5939dc65241e7bb8af8ca0ca1be4486424a3206d0ae
|
def get_neighbor(self, s):
'\n 返回当前点可访问的临近点\n :param s: node\n :return: 集合\n '
neighbors = set()
for u in self.env.motions:
s_next = tuple([(s[i] + u[i]) for i in range(2)])
if ((s_next not in self.env.obs) and (0 < s_next[0] < self.env.x_range) and (0 < s_next[1] < self.env.y_range)):
neighbors.add(s_next)
return neighbors
|
返回当前点可访问的临近点
:param s: node
:return: 集合
|
algorithm/A_star/AnytimeDStar.py
|
get_neighbor
|
G-H-Li/PathNavigation
| 1
|
python
|
def get_neighbor(self, s):
'\n 返回当前点可访问的临近点\n :param s: node\n :return: 集合\n '
neighbors = set()
for u in self.env.motions:
s_next = tuple([(s[i] + u[i]) for i in range(2)])
if ((s_next not in self.env.obs) and (0 < s_next[0] < self.env.x_range) and (0 < s_next[1] < self.env.y_range)):
neighbors.add(s_next)
return neighbors
|
def get_neighbor(self, s):
'\n 返回当前点可访问的临近点\n :param s: node\n :return: 集合\n '
neighbors = set()
for u in self.env.motions:
s_next = tuple([(s[i] + u[i]) for i in range(2)])
if ((s_next not in self.env.obs) and (0 < s_next[0] < self.env.x_range) and (0 < s_next[1] < self.env.y_range)):
neighbors.add(s_next)
return neighbors<|docstring|>返回当前点可访问的临近点
:param s: node
:return: 集合<|endoftext|>
|
12a8a27147f64381f19879f6fcf0aa5f7a5dba3fd7bf3774fc81c860c71ea18a
|
def get_mini_key(self):
'\n 获取OPEN字典中的最key值对\n :return:key , value\n '
if (len(self.OPEN) != 0):
s = min(self.OPEN, key=self.OPEN.get)
return (s, self.OPEN[s])
else:
return (None, None)
|
获取OPEN字典中的最key值对
:return:key , value
|
algorithm/A_star/AnytimeDStar.py
|
get_mini_key
|
G-H-Li/PathNavigation
| 1
|
python
|
def get_mini_key(self):
'\n 获取OPEN字典中的最key值对\n :return:key , value\n '
if (len(self.OPEN) != 0):
s = min(self.OPEN, key=self.OPEN.get)
return (s, self.OPEN[s])
else:
return (None, None)
|
def get_mini_key(self):
'\n 获取OPEN字典中的最key值对\n :return:key , value\n '
if (len(self.OPEN) != 0):
s = min(self.OPEN, key=self.OPEN.get)
return (s, self.OPEN[s])
else:
return (None, None)<|docstring|>获取OPEN字典中的最key值对
:return:key , value<|endoftext|>
|
d45a454d8ea827dc60e96edc13252cd52792f9b8dd09100ac300fcbb539f1a0f
|
def update_state(self, s):
'\n 更新节点状态\n :param s: node\n :return:\n '
if (s != self.goal):
for node in self.get_neighbor(s):
self.rhs[s] = min(self.rhs[s], (self.g[node] + self.get_cost(s, node)))
if (s in self.OPEN):
self.OPEN.pop(s)
if (self.g[s] != self.rhs[s]):
if (s not in self.CLOSED):
self.OPEN[s] = self.get_key(s)
else:
self.INCONS[s] = 0
|
更新节点状态
:param s: node
:return:
|
algorithm/A_star/AnytimeDStar.py
|
update_state
|
G-H-Li/PathNavigation
| 1
|
python
|
def update_state(self, s):
'\n 更新节点状态\n :param s: node\n :return:\n '
if (s != self.goal):
for node in self.get_neighbor(s):
self.rhs[s] = min(self.rhs[s], (self.g[node] + self.get_cost(s, node)))
if (s in self.OPEN):
self.OPEN.pop(s)
if (self.g[s] != self.rhs[s]):
if (s not in self.CLOSED):
self.OPEN[s] = self.get_key(s)
else:
self.INCONS[s] = 0
|
def update_state(self, s):
'\n 更新节点状态\n :param s: node\n :return:\n '
if (s != self.goal):
for node in self.get_neighbor(s):
self.rhs[s] = min(self.rhs[s], (self.g[node] + self.get_cost(s, node)))
if (s in self.OPEN):
self.OPEN.pop(s)
if (self.g[s] != self.rhs[s]):
if (s not in self.CLOSED):
self.OPEN[s] = self.get_key(s)
else:
self.INCONS[s] = 0<|docstring|>更新节点状态
:param s: node
:return:<|endoftext|>
|
21bb6827892739b36addd433b8fbde00c5e6cf5cab2681e6e0093f0e74837f46
|
def compute_or_improve_path(self):
'\n 计算或者改进路径\n :return:\n '
while True:
(key, value) = self.get_mini_key()
if ((key is None) or ((value >= self.get_key(self.start)) and (self.rhs[self.start] == self.g[self.start]))):
break
self.OPEN.pop(key)
self.visited.add(key)
if (self.g[key] > self.rhs[key]):
self.g[key] = self.rhs[key]
self.CLOSED.add(key)
for node in self.get_neighbor(key):
self.update_state(node)
else:
self.g[key] = float('inf')
for node in self.get_neighbor(key):
self.update_state(node)
self.update_state(key)
|
计算或者改进路径
:return:
|
algorithm/A_star/AnytimeDStar.py
|
compute_or_improve_path
|
G-H-Li/PathNavigation
| 1
|
python
|
def compute_or_improve_path(self):
'\n 计算或者改进路径\n :return:\n '
while True:
(key, value) = self.get_mini_key()
if ((key is None) or ((value >= self.get_key(self.start)) and (self.rhs[self.start] == self.g[self.start]))):
break
self.OPEN.pop(key)
self.visited.add(key)
if (self.g[key] > self.rhs[key]):
self.g[key] = self.rhs[key]
self.CLOSED.add(key)
for node in self.get_neighbor(key):
self.update_state(node)
else:
self.g[key] = float('inf')
for node in self.get_neighbor(key):
self.update_state(node)
self.update_state(key)
|
def compute_or_improve_path(self):
'\n 计算或者改进路径\n :return:\n '
while True:
(key, value) = self.get_mini_key()
if ((key is None) or ((value >= self.get_key(self.start)) and (self.rhs[self.start] == self.g[self.start]))):
break
self.OPEN.pop(key)
self.visited.add(key)
if (self.g[key] > self.rhs[key]):
self.g[key] = self.rhs[key]
self.CLOSED.add(key)
for node in self.get_neighbor(key):
self.update_state(node)
else:
self.g[key] = float('inf')
for node in self.get_neighbor(key):
self.update_state(node)
self.update_state(key)<|docstring|>计算或者改进路径
:return:<|endoftext|>
|
9f1a34c3b216b5c9cca3dcb59d77a4afce758f5add4249f33cf428d6ec01c2de
|
def publish_path(self):
'\n 正向搜索查找路径\n :return: 路径列表\n '
s = self.start
self.path = [s]
while True:
if (s == self.goal):
break
if (len(self.path) > (self.env.x_range * self.env.y_range)):
print(self.FLAG, 'Do not find path')
self.error += 1
break
s = self.find_mini_next_node(s)
self.path.append(s)
|
正向搜索查找路径
:return: 路径列表
|
algorithm/A_star/AnytimeDStar.py
|
publish_path
|
G-H-Li/PathNavigation
| 1
|
python
|
def publish_path(self):
'\n 正向搜索查找路径\n :return: 路径列表\n '
s = self.start
self.path = [s]
while True:
if (s == self.goal):
break
if (len(self.path) > (self.env.x_range * self.env.y_range)):
print(self.FLAG, 'Do not find path')
self.error += 1
break
s = self.find_mini_next_node(s)
self.path.append(s)
|
def publish_path(self):
'\n 正向搜索查找路径\n :return: 路径列表\n '
s = self.start
self.path = [s]
while True:
if (s == self.goal):
break
if (len(self.path) > (self.env.x_range * self.env.y_range)):
print(self.FLAG, 'Do not find path')
self.error += 1
break
s = self.find_mini_next_node(s)
self.path.append(s)<|docstring|>正向搜索查找路径
:return: 路径列表<|endoftext|>
|
09ac79382d89272ef02efada884db156602ae21c89889e6cfc9149e0700c56b7
|
def detect_changes(self, event):
'\n 此处检测点击事件,并执行路径规划\n 小变动与大变动的区别与应用场景有关,此处根据环境变化次数进行区分\n :param event: 点击事件\n :return:\n '
(x, y) = (event.xdata, event.ydata)
if ((x < 0) or (x > (self.env.x_range - 1)) or (y < 0) or (y > (self.env.y_range - 1))):
print(self.FLAG, 'Please click right area!')
else:
node = (int(x), int(y))
print(self.FLAG, 'position:', node)
if (node not in self.env.obs):
if (node == self.start):
self.error += 1
self.start = self.find_mini_next_node(self.start)
if (node == self.goal):
self.error += 1
self.goal = self.find_mini_next_node(self.goal)
self.env.obs.add(node)
self.g[node] = float('inf')
self.rhs[node] = float('inf')
else:
self.env.obs.remove(node)
self.update_state(node)
for sn in self.get_neighbor(node):
self.update_state(sn)
if (self.error >= 1):
self.error = 0
self.eps += 1.5
self.init_data()
self.optimize_path()
else:
while True:
if (len(self.INCONS) == 0):
break
self.OPEN.update(self.INCONS)
for s in self.OPEN:
self.OPEN[s] = self.get_key(s)
self.CLOSED = set()
self.explore_path()
if (self.eps <= 1.0):
break
plot_after_compute()
|
此处检测点击事件,并执行路径规划
小变动与大变动的区别与应用场景有关,此处根据环境变化次数进行区分
:param event: 点击事件
:return:
|
algorithm/A_star/AnytimeDStar.py
|
detect_changes
|
G-H-Li/PathNavigation
| 1
|
python
|
def detect_changes(self, event):
'\n 此处检测点击事件,并执行路径规划\n 小变动与大变动的区别与应用场景有关,此处根据环境变化次数进行区分\n :param event: 点击事件\n :return:\n '
(x, y) = (event.xdata, event.ydata)
if ((x < 0) or (x > (self.env.x_range - 1)) or (y < 0) or (y > (self.env.y_range - 1))):
print(self.FLAG, 'Please click right area!')
else:
node = (int(x), int(y))
print(self.FLAG, 'position:', node)
if (node not in self.env.obs):
if (node == self.start):
self.error += 1
self.start = self.find_mini_next_node(self.start)
if (node == self.goal):
self.error += 1
self.goal = self.find_mini_next_node(self.goal)
self.env.obs.add(node)
self.g[node] = float('inf')
self.rhs[node] = float('inf')
else:
self.env.obs.remove(node)
self.update_state(node)
for sn in self.get_neighbor(node):
self.update_state(sn)
if (self.error >= 1):
self.error = 0
self.eps += 1.5
self.init_data()
self.optimize_path()
else:
while True:
if (len(self.INCONS) == 0):
break
self.OPEN.update(self.INCONS)
for s in self.OPEN:
self.OPEN[s] = self.get_key(s)
self.CLOSED = set()
self.explore_path()
if (self.eps <= 1.0):
break
plot_after_compute()
|
def detect_changes(self, event):
'\n 此处检测点击事件,并执行路径规划\n 小变动与大变动的区别与应用场景有关,此处根据环境变化次数进行区分\n :param event: 点击事件\n :return:\n '
(x, y) = (event.xdata, event.ydata)
if ((x < 0) or (x > (self.env.x_range - 1)) or (y < 0) or (y > (self.env.y_range - 1))):
print(self.FLAG, 'Please click right area!')
else:
node = (int(x), int(y))
print(self.FLAG, 'position:', node)
if (node not in self.env.obs):
if (node == self.start):
self.error += 1
self.start = self.find_mini_next_node(self.start)
if (node == self.goal):
self.error += 1
self.goal = self.find_mini_next_node(self.goal)
self.env.obs.add(node)
self.g[node] = float('inf')
self.rhs[node] = float('inf')
else:
self.env.obs.remove(node)
self.update_state(node)
for sn in self.get_neighbor(node):
self.update_state(sn)
if (self.error >= 1):
self.error = 0
self.eps += 1.5
self.init_data()
self.optimize_path()
else:
while True:
if (len(self.INCONS) == 0):
break
self.OPEN.update(self.INCONS)
for s in self.OPEN:
self.OPEN[s] = self.get_key(s)
self.CLOSED = set()
self.explore_path()
if (self.eps <= 1.0):
break
plot_after_compute()<|docstring|>此处检测点击事件,并执行路径规划
小变动与大变动的区别与应用场景有关,此处根据环境变化次数进行区分
:param event: 点击事件
:return:<|endoftext|>
|
8a7c21fbcb32888aabc5fff38d879d95591f188e3d51fe6dc7a36f15762a0699
|
def run(self):
'\n 运行主方法\n :return:\n '
start_time = time.time()
self.explore_path()
self.optimize_path()
end_time = time.time()
print(self.FLAG, 'total cost:', (end_time - start_time), 's')
print(self.FLAG, 'start detect...')
plot_set_button_click_callback(func=self.detect_changes)
plot_show()
|
运行主方法
:return:
|
algorithm/A_star/AnytimeDStar.py
|
run
|
G-H-Li/PathNavigation
| 1
|
python
|
def run(self):
'\n 运行主方法\n :return:\n '
start_time = time.time()
self.explore_path()
self.optimize_path()
end_time = time.time()
print(self.FLAG, 'total cost:', (end_time - start_time), 's')
print(self.FLAG, 'start detect...')
plot_set_button_click_callback(func=self.detect_changes)
plot_show()
|
def run(self):
'\n 运行主方法\n :return:\n '
start_time = time.time()
self.explore_path()
self.optimize_path()
end_time = time.time()
print(self.FLAG, 'total cost:', (end_time - start_time), 's')
print(self.FLAG, 'start detect...')
plot_set_button_click_callback(func=self.detect_changes)
plot_show()<|docstring|>运行主方法
:return:<|endoftext|>
|
0b64123b99851be48873be37966605eb87379b87653fe46d6adc9e26d51a52a6
|
@testmode.standalone
def test_state_freezing(self):
'Test that we freeze the state at the time when the timeout expires.'
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
for ix in range(15):
state = self.get_state_copy()
state[UUT.KEY] += 1
self.save_state(state)
time.sleep(1)
return state
uut = UUT()
result = uut.start()
self.assertGreaterEqual(result[UUT.KEY], 4)
self.assertLessEqual(result[UUT.KEY], 6)
|
Test that we freeze the state at the time when the timeout expires.
|
tests/test_lambdaexecutor.py
|
test_state_freezing
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_state_freezing(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
for ix in range(15):
state = self.get_state_copy()
state[UUT.KEY] += 1
self.save_state(state)
time.sleep(1)
return state
uut = UUT()
result = uut.start()
self.assertGreaterEqual(result[UUT.KEY], 4)
self.assertLessEqual(result[UUT.KEY], 6)
|
@testmode.standalone
def test_state_freezing(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
for ix in range(15):
state = self.get_state_copy()
state[UUT.KEY] += 1
self.save_state(state)
time.sleep(1)
return state
uut = UUT()
result = uut.start()
self.assertGreaterEqual(result[UUT.KEY], 4)
self.assertLessEqual(result[UUT.KEY], 6)<|docstring|>Test that we freeze the state at the time when the timeout expires.<|endoftext|>
|
1557e7324b737a52b7c4b2828a834573086d147a0ecdf2a66c9ad49ee622bb33
|
@testmode.standalone
def test_exit(self):
'Test that we save the final state that `run()` returns.'
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
state = self.get_state_copy()
for ix in range(15):
state[UUT.KEY] += 1
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 15)
|
Test that we save the final state that `run()` returns.
|
tests/test_lambdaexecutor.py
|
test_exit
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_exit(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
state = self.get_state_copy()
for ix in range(15):
state[UUT.KEY] += 1
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 15)
|
@testmode.standalone
def test_exit(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(self) -> dict:
state = self.get_state_copy()
for ix in range(15):
state[UUT.KEY] += 1
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 15)<|docstring|>Test that we save the final state that `run()` returns.<|endoftext|>
|
d9f1ff11fedcb359f70d8a15a200c8d47d3c438f80083cf979839260a6743ef4
|
@testmode.standalone
def test_exception(self):
'Test that worker exceptions are exposed to caller'
class MyException(Exception):
pass
class UUT(lambdaexecutor.TimedThread[dict]):
def __init__(self) -> None:
super().__init__(5, {'key': 0})
def run(self) -> dict:
raise MyException()
uut = UUT()
with self.assertRaises(MyException):
uut.start()
|
Test that worker exceptions are exposed to caller
|
tests/test_lambdaexecutor.py
|
test_exception
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_exception(self):
class MyException(Exception):
pass
class UUT(lambdaexecutor.TimedThread[dict]):
def __init__(self) -> None:
super().__init__(5, {'key': 0})
def run(self) -> dict:
raise MyException()
uut = UUT()
with self.assertRaises(MyException):
uut.start()
|
@testmode.standalone
def test_exception(self):
class MyException(Exception):
pass
class UUT(lambdaexecutor.TimedThread[dict]):
def __init__(self) -> None:
super().__init__(5, {'key': 0})
def run(self) -> dict:
raise MyException()
uut = UUT()
with self.assertRaises(MyException):
uut.start()<|docstring|>Test that worker exceptions are exposed to caller<|endoftext|>
|
4c81ae4aaea2179418ae6bd033c1394b31413d1313cf5e0f7b76cc0d3ae6c197
|
@testmode.standalone
def test_immutable_constructor_state(self):
'Test that we make a copy of the state when we construct a TimedThread.'
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self, state: dict, event: threading.Event) -> None:
super().__init__(5, state)
self.event = event
def run(uut_self) -> dict:
uut_self.event.wait()
state = uut_self.get_state_copy()
self.assertEqual(state[UUT.KEY], 0)
return state
state = {UUT.KEY: 0}
event = threading.Event()
uut = UUT(state, event)
uut._start_async()
state[UUT.KEY] = 1
event.set()
uut._join()
|
Test that we make a copy of the state when we construct a TimedThread.
|
tests/test_lambdaexecutor.py
|
test_immutable_constructor_state
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_immutable_constructor_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self, state: dict, event: threading.Event) -> None:
super().__init__(5, state)
self.event = event
def run(uut_self) -> dict:
uut_self.event.wait()
state = uut_self.get_state_copy()
self.assertEqual(state[UUT.KEY], 0)
return state
state = {UUT.KEY: 0}
event = threading.Event()
uut = UUT(state, event)
uut._start_async()
state[UUT.KEY] = 1
event.set()
uut._join()
|
@testmode.standalone
def test_immutable_constructor_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self, state: dict, event: threading.Event) -> None:
super().__init__(5, state)
self.event = event
def run(uut_self) -> dict:
uut_self.event.wait()
state = uut_self.get_state_copy()
self.assertEqual(state[UUT.KEY], 0)
return state
state = {UUT.KEY: 0}
event = threading.Event()
uut = UUT(state, event)
uut._start_async()
state[UUT.KEY] = 1
event.set()
uut._join()<|docstring|>Test that we make a copy of the state when we construct a TimedThread.<|endoftext|>
|
9095952e32f728e8a90b33479def184868c52deee1a6c0541d5b18cfed7fad37
|
@testmode.standalone
def test_immutable_get_state(self):
'Test that we make a copy of the state when we get it.'
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = uut_self.get_state_copy()
state[UUT.KEY] = 1
self.assertNotEqual(state, uut_self.get_state_copy())
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 1)
|
Test that we make a copy of the state when we get it.
|
tests/test_lambdaexecutor.py
|
test_immutable_get_state
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_immutable_get_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = uut_self.get_state_copy()
state[UUT.KEY] = 1
self.assertNotEqual(state, uut_self.get_state_copy())
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 1)
|
@testmode.standalone
def test_immutable_get_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = uut_self.get_state_copy()
state[UUT.KEY] = 1
self.assertNotEqual(state, uut_self.get_state_copy())
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 1)<|docstring|>Test that we make a copy of the state when we get it.<|endoftext|>
|
1138410613c42859ab35e85733869c8cf56380243d212de5a6f9586a08e2ddfd
|
@testmode.standalone
def test_immutable_set_state(self):
'Test that we make a copy of the state when we set it.'
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = {UUT.KEY: 15}
uut_self.save_state(state)
state[UUT.KEY] = 5
self.assertEqual(uut_self.get_state_copy()[UUT.KEY], 15)
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 5)
|
Test that we make a copy of the state when we set it.
|
tests/test_lambdaexecutor.py
|
test_immutable_set_state
|
HumanCellAtlas/data-store
| 46
|
python
|
@testmode.standalone
def test_immutable_set_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = {UUT.KEY: 15}
uut_self.save_state(state)
state[UUT.KEY] = 5
self.assertEqual(uut_self.get_state_copy()[UUT.KEY], 15)
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 5)
|
@testmode.standalone
def test_immutable_set_state(self):
class UUT(lambdaexecutor.TimedThread[dict]):
KEY = 'key'
def __init__(self) -> None:
super().__init__(5, {UUT.KEY: 0})
def run(uut_self) -> dict:
state = {UUT.KEY: 15}
uut_self.save_state(state)
state[UUT.KEY] = 5
self.assertEqual(uut_self.get_state_copy()[UUT.KEY], 15)
return state
uut = UUT()
result = uut.start()
self.assertEqual(result[UUT.KEY], 5)<|docstring|>Test that we make a copy of the state when we set it.<|endoftext|>
|
8e6a417c846c6744c0fa3aaa0b4d6e4461ab65554ebd299bbd126019298eee43
|
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0):
'Return complex 2D Gabor filter kernel.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n g : complex array\n Complex filter kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
if (sigma_x is None):
sigma_x = (_sigma_prefactor(bandwidth) / frequency)
if (sigma_y is None):
sigma_y = (_sigma_prefactor(bandwidth) / frequency)
n_stds = 3
x0 = np.ceil(max(np.abs(((n_stds * sigma_x) * np.cos(theta))), np.abs(((n_stds * sigma_y) * np.sin(theta))), 1))
y0 = np.ceil(max(np.abs(((n_stds * sigma_y) * np.cos(theta))), np.abs(((n_stds * sigma_x) * np.sin(theta))), 1))
(y, x) = np.mgrid[((- y0):(y0 + 1), (- x0):(x0 + 1))]
rotx = ((x * np.cos(theta)) + (y * np.sin(theta)))
roty = (((- x) * np.sin(theta)) + (y * np.cos(theta)))
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(((- 0.5) * (((rotx ** 2) / (sigma_x ** 2)) + ((roty ** 2) / (sigma_y ** 2)))))
g /= (((2 * np.pi) * sigma_x) * sigma_y)
g *= np.exp((1j * ((((2 * np.pi) * frequency) * rotx) + offset)))
return g
|
Return complex 2D Gabor filter kernel.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
|
skimage/filter/_gabor.py
|
gabor_kernel
|
jeysonmc/scikit-image
| 3
|
python
|
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0):
'Return complex 2D Gabor filter kernel.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n g : complex array\n Complex filter kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
if (sigma_x is None):
sigma_x = (_sigma_prefactor(bandwidth) / frequency)
if (sigma_y is None):
sigma_y = (_sigma_prefactor(bandwidth) / frequency)
n_stds = 3
x0 = np.ceil(max(np.abs(((n_stds * sigma_x) * np.cos(theta))), np.abs(((n_stds * sigma_y) * np.sin(theta))), 1))
y0 = np.ceil(max(np.abs(((n_stds * sigma_y) * np.cos(theta))), np.abs(((n_stds * sigma_x) * np.sin(theta))), 1))
(y, x) = np.mgrid[((- y0):(y0 + 1), (- x0):(x0 + 1))]
rotx = ((x * np.cos(theta)) + (y * np.sin(theta)))
roty = (((- x) * np.sin(theta)) + (y * np.cos(theta)))
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(((- 0.5) * (((rotx ** 2) / (sigma_x ** 2)) + ((roty ** 2) / (sigma_y ** 2)))))
g /= (((2 * np.pi) * sigma_x) * sigma_y)
g *= np.exp((1j * ((((2 * np.pi) * frequency) * rotx) + offset)))
return g
|
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0):
'Return complex 2D Gabor filter kernel.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n g : complex array\n Complex filter kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
if (sigma_x is None):
sigma_x = (_sigma_prefactor(bandwidth) / frequency)
if (sigma_y is None):
sigma_y = (_sigma_prefactor(bandwidth) / frequency)
n_stds = 3
x0 = np.ceil(max(np.abs(((n_stds * sigma_x) * np.cos(theta))), np.abs(((n_stds * sigma_y) * np.sin(theta))), 1))
y0 = np.ceil(max(np.abs(((n_stds * sigma_y) * np.cos(theta))), np.abs(((n_stds * sigma_x) * np.sin(theta))), 1))
(y, x) = np.mgrid[((- y0):(y0 + 1), (- x0):(x0 + 1))]
rotx = ((x * np.cos(theta)) + (y * np.sin(theta)))
roty = (((- x) * np.sin(theta)) + (y * np.cos(theta)))
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(((- 0.5) * (((rotx ** 2) / (sigma_x ** 2)) + ((roty ** 2) / (sigma_y ** 2)))))
g /= (((2 * np.pi) * sigma_x) * sigma_y)
g *= np.exp((1j * ((((2 * np.pi) * frequency) * rotx) + offset)))
return g<|docstring|>Return complex 2D Gabor filter kernel.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf<|endoftext|>
|
271c9fbcae39e9b5e97e229f9f9de9478f5015ccc08731017afc05eebeac34ba
|
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0, mode='reflect', cval=0):
'Return real and imaginary responses to Gabor filter.\n\n The real and imaginary parts of the Gabor filter kernel are applied to the\n image and the response is returned as a pair of arrays.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n image : array\n Input image.\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n real, imag : arrays\n Filtered images using the real and imaginary parts of the Gabor filter\n kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)
filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)
return (filtered_real, filtered_imag)
|
Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
image : array
Input image.
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
|
skimage/filter/_gabor.py
|
gabor_filter
|
jeysonmc/scikit-image
| 3
|
python
|
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0, mode='reflect', cval=0):
'Return real and imaginary responses to Gabor filter.\n\n The real and imaginary parts of the Gabor filter kernel are applied to the\n image and the response is returned as a pair of arrays.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n image : array\n Input image.\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n real, imag : arrays\n Filtered images using the real and imaginary parts of the Gabor filter\n kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)
filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)
return (filtered_real, filtered_imag)
|
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None, offset=0, mode='reflect', cval=0):
'Return real and imaginary responses to Gabor filter.\n\n The real and imaginary parts of the Gabor filter kernel are applied to the\n image and the response is returned as a pair of arrays.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n image : array\n Input image.\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n real, imag : arrays\n Filtered images using the real and imaginary parts of the Gabor filter\n kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n '
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)
filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)
return (filtered_real, filtered_imag)<|docstring|>Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
image : array
Input image.
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf<|endoftext|>
|
b4111f83e8a16d6abceaf31b37d4b856c667deb0acb8e6a38454c258356c57aa
|
def find_existing_files(torrent, path, add_name_to_folder=True):
'\n Checks if the files in a torrent exist,\n returns a tuple of found files, missing files, size found, size missing.\n '
(found, missing, found_size, missing_size) = (0, 0, 0, 0)
for (fp, f, size, found) in map_existing_files(torrent, path, add_name_to_folder=add_name_to_folder):
if found:
found += 1
found_size += size
else:
missing += 1
missing_size += size
return (found, missing, found_size, missing_size)
|
Checks if the files in a torrent exist,
returns a tuple of found files, missing files, size found, size missing.
|
libtc/utils.py
|
find_existing_files
|
tobbez/libtc
| 0
|
python
|
def find_existing_files(torrent, path, add_name_to_folder=True):
'\n Checks if the files in a torrent exist,\n returns a tuple of found files, missing files, size found, size missing.\n '
(found, missing, found_size, missing_size) = (0, 0, 0, 0)
for (fp, f, size, found) in map_existing_files(torrent, path, add_name_to_folder=add_name_to_folder):
if found:
found += 1
found_size += size
else:
missing += 1
missing_size += size
return (found, missing, found_size, missing_size)
|
def find_existing_files(torrent, path, add_name_to_folder=True):
'\n Checks if the files in a torrent exist,\n returns a tuple of found files, missing files, size found, size missing.\n '
(found, missing, found_size, missing_size) = (0, 0, 0, 0)
for (fp, f, size, found) in map_existing_files(torrent, path, add_name_to_folder=add_name_to_folder):
if found:
found += 1
found_size += size
else:
missing += 1
missing_size += size
return (found, missing, found_size, missing_size)<|docstring|>Checks if the files in a torrent exist,
returns a tuple of found files, missing files, size found, size missing.<|endoftext|>
|
c022169a20dcaa71c1a0a8308e95a176ea61b9ab0a996dc5661278ea02f46d5f
|
def get_res_dimension(self, l, w):
'Get PO resistor dimension in core and two edge blocks.\n '
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
po_co_ency = self.res_config['po_co_enc'][1]
(dpo_wmin, dpo_lmin) = self.res_config['dpo_dim_min']
po_rpo_ext_exact = self.res_config.get('po_rpo_ext_exact', (- 1))
if (po_rpo_ext_exact >= 0):
lres = (l + (2 * po_rpo_ext_exact))
else:
lres = (l + (2 * ((rpo_co_sp + co_w) + po_co_ency)))
return (w, lres, dpo_wmin, dpo_lmin)
|
Get PO resistor dimension in core and two edge blocks.
|
abs_templates_ec/resistor/planar.py
|
get_res_dimension
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_res_dimension(self, l, w):
'\n '
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
po_co_ency = self.res_config['po_co_enc'][1]
(dpo_wmin, dpo_lmin) = self.res_config['dpo_dim_min']
po_rpo_ext_exact = self.res_config.get('po_rpo_ext_exact', (- 1))
if (po_rpo_ext_exact >= 0):
lres = (l + (2 * po_rpo_ext_exact))
else:
lres = (l + (2 * ((rpo_co_sp + co_w) + po_co_ency)))
return (w, lres, dpo_wmin, dpo_lmin)
|
def get_res_dimension(self, l, w):
'\n '
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
po_co_ency = self.res_config['po_co_enc'][1]
(dpo_wmin, dpo_lmin) = self.res_config['dpo_dim_min']
po_rpo_ext_exact = self.res_config.get('po_rpo_ext_exact', (- 1))
if (po_rpo_ext_exact >= 0):
lres = (l + (2 * po_rpo_ext_exact))
else:
lres = (l + (2 * ((rpo_co_sp + co_w) + po_co_ency)))
return (w, lres, dpo_wmin, dpo_lmin)<|docstring|>Get PO resistor dimension in core and two edge blocks.<|endoftext|>
|
d6713b60ead7fd3e258e82c2eeb7286038db61c4083560b12b661f87cf74676e
|
def get_min_res_core_size(self, l, w, res_type, sub_type, threshold, options):
'Returns smallest possible resistor core dimension.\n\n width calculated so we can draw at least 1 dummy OD.\n height calculated so adjacent resistor is DRC clean.\n '
od_wmin = self.res_config['od_dim_min'][0]
po_od_sp = self.res_config['po_od_sp']
po_sp = self.res_config['po_sp']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
if od_in_res:
hblk = (lres + po_sp)
wblk = ((wres + (2 * po_od_sp)) + od_wmin)
else:
hblk = ((lres + (2 * (imp_ency + imp_od_sp))) + od_wmin)
wblk = (wres + po_sp)
return (wblk, hblk)
|
Returns smallest possible resistor core dimension.
width calculated so we can draw at least 1 dummy OD.
height calculated so adjacent resistor is DRC clean.
|
abs_templates_ec/resistor/planar.py
|
get_min_res_core_size
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_min_res_core_size(self, l, w, res_type, sub_type, threshold, options):
'Returns smallest possible resistor core dimension.\n\n width calculated so we can draw at least 1 dummy OD.\n height calculated so adjacent resistor is DRC clean.\n '
od_wmin = self.res_config['od_dim_min'][0]
po_od_sp = self.res_config['po_od_sp']
po_sp = self.res_config['po_sp']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
if od_in_res:
hblk = (lres + po_sp)
wblk = ((wres + (2 * po_od_sp)) + od_wmin)
else:
hblk = ((lres + (2 * (imp_ency + imp_od_sp))) + od_wmin)
wblk = (wres + po_sp)
return (wblk, hblk)
|
def get_min_res_core_size(self, l, w, res_type, sub_type, threshold, options):
'Returns smallest possible resistor core dimension.\n\n width calculated so we can draw at least 1 dummy OD.\n height calculated so adjacent resistor is DRC clean.\n '
od_wmin = self.res_config['od_dim_min'][0]
po_od_sp = self.res_config['po_od_sp']
po_sp = self.res_config['po_sp']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
if od_in_res:
hblk = (lres + po_sp)
wblk = ((wres + (2 * po_od_sp)) + od_wmin)
else:
hblk = ((lres + (2 * (imp_ency + imp_od_sp))) + od_wmin)
wblk = (wres + po_sp)
return (wblk, hblk)<|docstring|>Returns smallest possible resistor core dimension.
width calculated so we can draw at least 1 dummy OD.
height calculated so adjacent resistor is DRC clean.<|endoftext|>
|
2227c06cc658268fb2826b4ec56a5278593e3958868593255c9c4b60486f0184
|
def get_via0_info(self, xc, yc, wres, resolution):
'Compute resistor CO parameters and metal 1 bounding box.'
mos_layer_table = self.config['mos_layer_table']
layer_table = self.config['layer_name']
via_id_table = self.config['via_id']
co_w = self.res_config['co_w']
co_sp = self.res_config['co_sp']
(po_co_encx, po_co_ency) = self.res_config['po_co_enc']
(m1_co_encx, m1_co_ency) = self.res_config['m1_co_enc']
num_co = (((wres - (po_co_encx * 2)) + co_sp) // (co_w + co_sp))
m1_h = (co_w + (2 * m1_co_ency))
po_name = mos_layer_table['PO']
m1_name = layer_table[1]
via_id = via_id_table[(po_name, m1_name)]
via_params = dict(via_type=via_id, loc=[xc, yc], num_cols=num_co, sp_cols=co_sp, enc1=[po_co_encx, po_co_encx, po_co_ency, po_co_ency], enc2=[m1_co_encx, m1_co_encx, m1_co_ency, m1_co_ency], unit_mode=True)
varr_w = ((num_co * (co_w + co_sp)) - co_sp)
m1_type = self.tech_info.get_layer_type(layer_table[1])
m1_min_len = self.tech_info.get_min_length_unit(m1_type, m1_h)
m1_w = max(((2 * m1_co_encx) + varr_w), m1_min_len)
m1_xl = (xc - (m1_w // 2))
m1_xr = (m1_xl + m1_w)
m1_yb = (yc - (m1_h // 2))
m1_yt = (m1_yb + m1_h)
m1_box = BBox(m1_xl, m1_yb, m1_xr, m1_yt, resolution, unit_mode=True)
return (via_params, m1_box)
|
Compute resistor CO parameters and metal 1 bounding box.
|
abs_templates_ec/resistor/planar.py
|
get_via0_info
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_via0_info(self, xc, yc, wres, resolution):
mos_layer_table = self.config['mos_layer_table']
layer_table = self.config['layer_name']
via_id_table = self.config['via_id']
co_w = self.res_config['co_w']
co_sp = self.res_config['co_sp']
(po_co_encx, po_co_ency) = self.res_config['po_co_enc']
(m1_co_encx, m1_co_ency) = self.res_config['m1_co_enc']
num_co = (((wres - (po_co_encx * 2)) + co_sp) // (co_w + co_sp))
m1_h = (co_w + (2 * m1_co_ency))
po_name = mos_layer_table['PO']
m1_name = layer_table[1]
via_id = via_id_table[(po_name, m1_name)]
via_params = dict(via_type=via_id, loc=[xc, yc], num_cols=num_co, sp_cols=co_sp, enc1=[po_co_encx, po_co_encx, po_co_ency, po_co_ency], enc2=[m1_co_encx, m1_co_encx, m1_co_ency, m1_co_ency], unit_mode=True)
varr_w = ((num_co * (co_w + co_sp)) - co_sp)
m1_type = self.tech_info.get_layer_type(layer_table[1])
m1_min_len = self.tech_info.get_min_length_unit(m1_type, m1_h)
m1_w = max(((2 * m1_co_encx) + varr_w), m1_min_len)
m1_xl = (xc - (m1_w // 2))
m1_xr = (m1_xl + m1_w)
m1_yb = (yc - (m1_h // 2))
m1_yt = (m1_yb + m1_h)
m1_box = BBox(m1_xl, m1_yb, m1_xr, m1_yt, resolution, unit_mode=True)
return (via_params, m1_box)
|
def get_via0_info(self, xc, yc, wres, resolution):
mos_layer_table = self.config['mos_layer_table']
layer_table = self.config['layer_name']
via_id_table = self.config['via_id']
co_w = self.res_config['co_w']
co_sp = self.res_config['co_sp']
(po_co_encx, po_co_ency) = self.res_config['po_co_enc']
(m1_co_encx, m1_co_ency) = self.res_config['m1_co_enc']
num_co = (((wres - (po_co_encx * 2)) + co_sp) // (co_w + co_sp))
m1_h = (co_w + (2 * m1_co_ency))
po_name = mos_layer_table['PO']
m1_name = layer_table[1]
via_id = via_id_table[(po_name, m1_name)]
via_params = dict(via_type=via_id, loc=[xc, yc], num_cols=num_co, sp_cols=co_sp, enc1=[po_co_encx, po_co_encx, po_co_ency, po_co_ency], enc2=[m1_co_encx, m1_co_encx, m1_co_ency, m1_co_ency], unit_mode=True)
varr_w = ((num_co * (co_w + co_sp)) - co_sp)
m1_type = self.tech_info.get_layer_type(layer_table[1])
m1_min_len = self.tech_info.get_min_length_unit(m1_type, m1_h)
m1_w = max(((2 * m1_co_encx) + varr_w), m1_min_len)
m1_xl = (xc - (m1_w // 2))
m1_xr = (m1_xl + m1_w)
m1_yb = (yc - (m1_h // 2))
m1_yt = (m1_yb + m1_h)
m1_box = BBox(m1_xl, m1_yb, m1_xr, m1_yt, resolution, unit_mode=True)
return (via_params, m1_box)<|docstring|>Compute resistor CO parameters and metal 1 bounding box.<|endoftext|>
|
76b18bf889ba8b0584bf4380a98cd804b0d42fb81b584f864e9edd24f81877bc
|
def get_core_info(self, grid, width, height, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Compute core layout information dictionary.\n\n This method checks max PO and min OD density rules.\n '
layer_table = self.config['layer_name']
od_wmin = self.res_config['od_dim_min'][0]
od_wmax = self.res_config['od_dim_max'][0]
od_sp = self.res_config['od_sp']
od_min_density = self.res_config['od_min_density']
po_od_sp = self.res_config['po_od_sp']
po_max_density = self.res_config['po_max_density']
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
m1_sp_max = self.res_config['m1_sp_max']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
max_res_area = int(((width * height) * po_max_density))
if ((wres * lres) > max_res_area):
return None
if od_in_res:
bnd_spx = ((width - wres) // 2)
area = (2 * (bnd_spx - po_od_sp))
lr_od_xloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=((- bnd_spx) + po_od_sp), sp_max=None, fill_on_edge=True, cyclic=False)[0]
lr_od_h = lres
else:
lr_od_xloc = []
lr_od_h = 0
bnd_spy = ((height - lres) // 2)
if od_in_res:
area = (2 * (bnd_spy - po_od_sp))
tb_od_offset = ((- bnd_spy) + po_od_sp)
tb_od_w = wres
else:
area = (2 * (bnd_spy - (imp_od_sp + imp_ency)))
tb_od_offset = (((- bnd_spy) + imp_od_sp) + imp_ency)
tb_od_w = (width - od_sp)
dod_dx = ((width - tb_od_w) // 2)
tb_od_xloc = [(dod_dx, (width - dod_dx))]
tb_od_yloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=tb_od_offset, sp_max=None, fill_on_edge=True, cyclic=False)[0]
min_od_area = int(math.ceil(((width * height) * od_min_density)))
od_area = 0
for (od_w, od_intv_list) in ((lr_od_h, lr_od_xloc), (tb_od_w, tb_od_yloc)):
for (lower, upper) in od_intv_list:
od_area += (od_w * (upper - lower))
if (od_area < min_od_area):
return None
num_dummy_half = (- ((- len(tb_od_yloc)) // 2))
bot_od_yloc = tb_od_yloc[(- num_dummy_half):]
top_od_yloc = [((a + height), (b + height)) for (a, b) in tb_od_yloc[:num_dummy_half]]
layout_info = dict(width=width, height=height, lr_od_xloc=lr_od_xloc, bot_od_yloc=bot_od_yloc, top_od_yloc=top_od_yloc, tb_od_xloc=tb_od_xloc)
xc = (width // 2)
rpdmy_yb = ((height // 2) - (l // 2))
rpdmy_yt = (rpdmy_yb + l)
bot_yc = ((rpdmy_yb - rpo_co_sp) - (co_w // 2))
top_yc = ((rpdmy_yt + rpo_co_sp) + (co_w // 2))
bot_layer = self.get_bot_layer()
bot_pitch = grid.get_track_pitch(bot_layer, unit_mode=True)
bot_num_tr = ((height // bot_pitch) if ((height % bot_pitch) == 0) else (height / bot_pitch))
(m2_w, m2_sp) = (track_widths[0], track_spaces[0])
if isinstance(m2_sp, int):
bot_tr_min = ((((m2_w + m2_sp) + 1) / 2) - 1)
else:
bot_tr_min = ((((m2_w + m2_sp) + 1.5) / 2) - 1)
top_tr_max = ((bot_num_tr - 1) - bot_tr_min)
top_tr = min(top_tr_max, grid.coord_to_nearest_track(bot_layer, top_yc, half_track=True, mode=1, unit_mode=True))
bot_tr = max(bot_tr_min, grid.coord_to_nearest_track(bot_layer, bot_yc, half_track=True, mode=(- 1), unit_mode=True))
m1_name = layer_table[1]
m2_name = layer_table[2]
m2_h = grid.get_track_width(bot_layer, m2_w, unit_mode=True)
m2_type = self.tech_info.get_layer_type(m2_name)
m2_len_min = self.tech_info.get_min_length_unit(m2_type, m2_h)
res = grid.resolution
port_info = []
for (port_name, yc, m2_tr) in (('bot', bot_yc, bot_tr), ('top', top_yc, top_tr)):
(via0_params, m1_box) = self.get_via0_info(xc, yc, wres, res)
m2_yc = grid.track_to_coord(bot_layer, m2_tr, unit_mode=True)
v1_box = BBox(m1_box.left_unit, (m2_yc - (m2_h // 2)), m1_box.right_unit, (m2_yc + (m2_h // 2)), res, unit_mode=True)
via1_info = grid.tech_info.get_via_info(v1_box, m1_name, m2_name, 'y')
m1_box = m1_box.merge(via1_info['bot_box'])
m2_box = via1_info['top_box']
m2_box = m2_box.expand(dx=max(0, ((m2_len_min - m2_box.width_unit) // 2)), unit_mode=True)
via1_params = via1_info['params']
via1_params['via_type'] = via1_params.pop('id')
port_info.append((port_name, via0_params, via1_params, m1_box, m2_box))
m1_w = port_info[0][3].width_unit
m1_h = port_info[0][3].height_unit
m1_bot = port_info[0][3]
m1_top = port_info[1][3]
(m1_bot_yb, m1_bot_yt) = (m1_bot.bottom_unit, m1_bot.top_unit)
(m1_top_yb, m1_top_yt) = (m1_top.bottom_unit, m1_top.top_unit)
m1_core_mid_y = fill_symmetric_const_space((m1_top_yb - m1_bot_yt), m1_sp_max, m1_h, m1_h, offset=m1_bot_yt)
m1_core_top_y = fill_symmetric_const_space(((m1_bot_yb + height) - m1_top_yt), m1_sp_max, m1_h, m1_h, offset=m1_top_yt)
fill_len2 = (- ((- len(m1_core_top_y)) // 2))
m1_core_y = [((a - height), (b - height)) for (a, b) in m1_core_top_y[(- fill_len2):]]
m1_core_y.append((m1_bot_yb, m1_bot_yt))
m1_core_y.extend(m1_core_mid_y)
m1_core_y.append((m1_top_yb, m1_top_yt))
m1_core_y.extend(m1_core_top_y[:fill_len2])
(m1_xl, m1_xr) = (m1_bot.left_unit, m1_bot.right_unit)
sp_xl = ((- width) + m1_xr)
sp_xr = m1_xl
m1_core_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
m1_core_x.append((m1_xl, m1_xr))
m1_core_x.extend((((a + width), (b + width)) for (a, b) in m1_core_x[:(- 1)]))
layout_info['port_info'] = port_info
layout_info['m1_core_x'] = m1_core_x
layout_info['m1_core_y'] = m1_core_y
layout_info['m1_w'] = m1_w
layout_info['m1_h'] = m1_h
return layout_info
|
Compute core layout information dictionary.
This method checks max PO and min OD density rules.
|
abs_templates_ec/resistor/planar.py
|
get_core_info
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_core_info(self, grid, width, height, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Compute core layout information dictionary.\n\n This method checks max PO and min OD density rules.\n '
layer_table = self.config['layer_name']
od_wmin = self.res_config['od_dim_min'][0]
od_wmax = self.res_config['od_dim_max'][0]
od_sp = self.res_config['od_sp']
od_min_density = self.res_config['od_min_density']
po_od_sp = self.res_config['po_od_sp']
po_max_density = self.res_config['po_max_density']
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
m1_sp_max = self.res_config['m1_sp_max']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
max_res_area = int(((width * height) * po_max_density))
if ((wres * lres) > max_res_area):
return None
if od_in_res:
bnd_spx = ((width - wres) // 2)
area = (2 * (bnd_spx - po_od_sp))
lr_od_xloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=((- bnd_spx) + po_od_sp), sp_max=None, fill_on_edge=True, cyclic=False)[0]
lr_od_h = lres
else:
lr_od_xloc = []
lr_od_h = 0
bnd_spy = ((height - lres) // 2)
if od_in_res:
area = (2 * (bnd_spy - po_od_sp))
tb_od_offset = ((- bnd_spy) + po_od_sp)
tb_od_w = wres
else:
area = (2 * (bnd_spy - (imp_od_sp + imp_ency)))
tb_od_offset = (((- bnd_spy) + imp_od_sp) + imp_ency)
tb_od_w = (width - od_sp)
dod_dx = ((width - tb_od_w) // 2)
tb_od_xloc = [(dod_dx, (width - dod_dx))]
tb_od_yloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=tb_od_offset, sp_max=None, fill_on_edge=True, cyclic=False)[0]
min_od_area = int(math.ceil(((width * height) * od_min_density)))
od_area = 0
for (od_w, od_intv_list) in ((lr_od_h, lr_od_xloc), (tb_od_w, tb_od_yloc)):
for (lower, upper) in od_intv_list:
od_area += (od_w * (upper - lower))
if (od_area < min_od_area):
return None
num_dummy_half = (- ((- len(tb_od_yloc)) // 2))
bot_od_yloc = tb_od_yloc[(- num_dummy_half):]
top_od_yloc = [((a + height), (b + height)) for (a, b) in tb_od_yloc[:num_dummy_half]]
layout_info = dict(width=width, height=height, lr_od_xloc=lr_od_xloc, bot_od_yloc=bot_od_yloc, top_od_yloc=top_od_yloc, tb_od_xloc=tb_od_xloc)
xc = (width // 2)
rpdmy_yb = ((height // 2) - (l // 2))
rpdmy_yt = (rpdmy_yb + l)
bot_yc = ((rpdmy_yb - rpo_co_sp) - (co_w // 2))
top_yc = ((rpdmy_yt + rpo_co_sp) + (co_w // 2))
bot_layer = self.get_bot_layer()
bot_pitch = grid.get_track_pitch(bot_layer, unit_mode=True)
bot_num_tr = ((height // bot_pitch) if ((height % bot_pitch) == 0) else (height / bot_pitch))
(m2_w, m2_sp) = (track_widths[0], track_spaces[0])
if isinstance(m2_sp, int):
bot_tr_min = ((((m2_w + m2_sp) + 1) / 2) - 1)
else:
bot_tr_min = ((((m2_w + m2_sp) + 1.5) / 2) - 1)
top_tr_max = ((bot_num_tr - 1) - bot_tr_min)
top_tr = min(top_tr_max, grid.coord_to_nearest_track(bot_layer, top_yc, half_track=True, mode=1, unit_mode=True))
bot_tr = max(bot_tr_min, grid.coord_to_nearest_track(bot_layer, bot_yc, half_track=True, mode=(- 1), unit_mode=True))
m1_name = layer_table[1]
m2_name = layer_table[2]
m2_h = grid.get_track_width(bot_layer, m2_w, unit_mode=True)
m2_type = self.tech_info.get_layer_type(m2_name)
m2_len_min = self.tech_info.get_min_length_unit(m2_type, m2_h)
res = grid.resolution
port_info = []
for (port_name, yc, m2_tr) in (('bot', bot_yc, bot_tr), ('top', top_yc, top_tr)):
(via0_params, m1_box) = self.get_via0_info(xc, yc, wres, res)
m2_yc = grid.track_to_coord(bot_layer, m2_tr, unit_mode=True)
v1_box = BBox(m1_box.left_unit, (m2_yc - (m2_h // 2)), m1_box.right_unit, (m2_yc + (m2_h // 2)), res, unit_mode=True)
via1_info = grid.tech_info.get_via_info(v1_box, m1_name, m2_name, 'y')
m1_box = m1_box.merge(via1_info['bot_box'])
m2_box = via1_info['top_box']
m2_box = m2_box.expand(dx=max(0, ((m2_len_min - m2_box.width_unit) // 2)), unit_mode=True)
via1_params = via1_info['params']
via1_params['via_type'] = via1_params.pop('id')
port_info.append((port_name, via0_params, via1_params, m1_box, m2_box))
m1_w = port_info[0][3].width_unit
m1_h = port_info[0][3].height_unit
m1_bot = port_info[0][3]
m1_top = port_info[1][3]
(m1_bot_yb, m1_bot_yt) = (m1_bot.bottom_unit, m1_bot.top_unit)
(m1_top_yb, m1_top_yt) = (m1_top.bottom_unit, m1_top.top_unit)
m1_core_mid_y = fill_symmetric_const_space((m1_top_yb - m1_bot_yt), m1_sp_max, m1_h, m1_h, offset=m1_bot_yt)
m1_core_top_y = fill_symmetric_const_space(((m1_bot_yb + height) - m1_top_yt), m1_sp_max, m1_h, m1_h, offset=m1_top_yt)
fill_len2 = (- ((- len(m1_core_top_y)) // 2))
m1_core_y = [((a - height), (b - height)) for (a, b) in m1_core_top_y[(- fill_len2):]]
m1_core_y.append((m1_bot_yb, m1_bot_yt))
m1_core_y.extend(m1_core_mid_y)
m1_core_y.append((m1_top_yb, m1_top_yt))
m1_core_y.extend(m1_core_top_y[:fill_len2])
(m1_xl, m1_xr) = (m1_bot.left_unit, m1_bot.right_unit)
sp_xl = ((- width) + m1_xr)
sp_xr = m1_xl
m1_core_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
m1_core_x.append((m1_xl, m1_xr))
m1_core_x.extend((((a + width), (b + width)) for (a, b) in m1_core_x[:(- 1)]))
layout_info['port_info'] = port_info
layout_info['m1_core_x'] = m1_core_x
layout_info['m1_core_y'] = m1_core_y
layout_info['m1_w'] = m1_w
layout_info['m1_h'] = m1_h
return layout_info
|
def get_core_info(self, grid, width, height, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Compute core layout information dictionary.\n\n This method checks max PO and min OD density rules.\n '
layer_table = self.config['layer_name']
od_wmin = self.res_config['od_dim_min'][0]
od_wmax = self.res_config['od_dim_max'][0]
od_sp = self.res_config['od_sp']
od_min_density = self.res_config['od_min_density']
po_od_sp = self.res_config['po_od_sp']
po_max_density = self.res_config['po_max_density']
co_w = self.res_config['co_w']
rpo_co_sp = self.res_config['rpo_co_sp']
m1_sp_max = self.res_config['m1_sp_max']
imp_od_sp = self.res_config['imp_od_sp']
imp_ency = self.res_config['imp_enc'][1]
res_info = self.res_config['info'][res_type]
od_in_res = res_info['od_in_res']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
max_res_area = int(((width * height) * po_max_density))
if ((wres * lres) > max_res_area):
return None
if od_in_res:
bnd_spx = ((width - wres) // 2)
area = (2 * (bnd_spx - po_od_sp))
lr_od_xloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=((- bnd_spx) + po_od_sp), sp_max=None, fill_on_edge=True, cyclic=False)[0]
lr_od_h = lres
else:
lr_od_xloc = []
lr_od_h = 0
bnd_spy = ((height - lres) // 2)
if od_in_res:
area = (2 * (bnd_spy - po_od_sp))
tb_od_offset = ((- bnd_spy) + po_od_sp)
tb_od_w = wres
else:
area = (2 * (bnd_spy - (imp_od_sp + imp_ency)))
tb_od_offset = (((- bnd_spy) + imp_od_sp) + imp_ency)
tb_od_w = (width - od_sp)
dod_dx = ((width - tb_od_w) // 2)
tb_od_xloc = [(dod_dx, (width - dod_dx))]
tb_od_yloc = fill_symmetric_max_density(area, area, od_wmin, od_wmax, od_sp, offset=tb_od_offset, sp_max=None, fill_on_edge=True, cyclic=False)[0]
min_od_area = int(math.ceil(((width * height) * od_min_density)))
od_area = 0
for (od_w, od_intv_list) in ((lr_od_h, lr_od_xloc), (tb_od_w, tb_od_yloc)):
for (lower, upper) in od_intv_list:
od_area += (od_w * (upper - lower))
if (od_area < min_od_area):
return None
num_dummy_half = (- ((- len(tb_od_yloc)) // 2))
bot_od_yloc = tb_od_yloc[(- num_dummy_half):]
top_od_yloc = [((a + height), (b + height)) for (a, b) in tb_od_yloc[:num_dummy_half]]
layout_info = dict(width=width, height=height, lr_od_xloc=lr_od_xloc, bot_od_yloc=bot_od_yloc, top_od_yloc=top_od_yloc, tb_od_xloc=tb_od_xloc)
xc = (width // 2)
rpdmy_yb = ((height // 2) - (l // 2))
rpdmy_yt = (rpdmy_yb + l)
bot_yc = ((rpdmy_yb - rpo_co_sp) - (co_w // 2))
top_yc = ((rpdmy_yt + rpo_co_sp) + (co_w // 2))
bot_layer = self.get_bot_layer()
bot_pitch = grid.get_track_pitch(bot_layer, unit_mode=True)
bot_num_tr = ((height // bot_pitch) if ((height % bot_pitch) == 0) else (height / bot_pitch))
(m2_w, m2_sp) = (track_widths[0], track_spaces[0])
if isinstance(m2_sp, int):
bot_tr_min = ((((m2_w + m2_sp) + 1) / 2) - 1)
else:
bot_tr_min = ((((m2_w + m2_sp) + 1.5) / 2) - 1)
top_tr_max = ((bot_num_tr - 1) - bot_tr_min)
top_tr = min(top_tr_max, grid.coord_to_nearest_track(bot_layer, top_yc, half_track=True, mode=1, unit_mode=True))
bot_tr = max(bot_tr_min, grid.coord_to_nearest_track(bot_layer, bot_yc, half_track=True, mode=(- 1), unit_mode=True))
m1_name = layer_table[1]
m2_name = layer_table[2]
m2_h = grid.get_track_width(bot_layer, m2_w, unit_mode=True)
m2_type = self.tech_info.get_layer_type(m2_name)
m2_len_min = self.tech_info.get_min_length_unit(m2_type, m2_h)
res = grid.resolution
port_info = []
for (port_name, yc, m2_tr) in (('bot', bot_yc, bot_tr), ('top', top_yc, top_tr)):
(via0_params, m1_box) = self.get_via0_info(xc, yc, wres, res)
m2_yc = grid.track_to_coord(bot_layer, m2_tr, unit_mode=True)
v1_box = BBox(m1_box.left_unit, (m2_yc - (m2_h // 2)), m1_box.right_unit, (m2_yc + (m2_h // 2)), res, unit_mode=True)
via1_info = grid.tech_info.get_via_info(v1_box, m1_name, m2_name, 'y')
m1_box = m1_box.merge(via1_info['bot_box'])
m2_box = via1_info['top_box']
m2_box = m2_box.expand(dx=max(0, ((m2_len_min - m2_box.width_unit) // 2)), unit_mode=True)
via1_params = via1_info['params']
via1_params['via_type'] = via1_params.pop('id')
port_info.append((port_name, via0_params, via1_params, m1_box, m2_box))
m1_w = port_info[0][3].width_unit
m1_h = port_info[0][3].height_unit
m1_bot = port_info[0][3]
m1_top = port_info[1][3]
(m1_bot_yb, m1_bot_yt) = (m1_bot.bottom_unit, m1_bot.top_unit)
(m1_top_yb, m1_top_yt) = (m1_top.bottom_unit, m1_top.top_unit)
m1_core_mid_y = fill_symmetric_const_space((m1_top_yb - m1_bot_yt), m1_sp_max, m1_h, m1_h, offset=m1_bot_yt)
m1_core_top_y = fill_symmetric_const_space(((m1_bot_yb + height) - m1_top_yt), m1_sp_max, m1_h, m1_h, offset=m1_top_yt)
fill_len2 = (- ((- len(m1_core_top_y)) // 2))
m1_core_y = [((a - height), (b - height)) for (a, b) in m1_core_top_y[(- fill_len2):]]
m1_core_y.append((m1_bot_yb, m1_bot_yt))
m1_core_y.extend(m1_core_mid_y)
m1_core_y.append((m1_top_yb, m1_top_yt))
m1_core_y.extend(m1_core_top_y[:fill_len2])
(m1_xl, m1_xr) = (m1_bot.left_unit, m1_bot.right_unit)
sp_xl = ((- width) + m1_xr)
sp_xr = m1_xl
m1_core_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
m1_core_x.append((m1_xl, m1_xr))
m1_core_x.extend((((a + width), (b + width)) for (a, b) in m1_core_x[:(- 1)]))
layout_info['port_info'] = port_info
layout_info['m1_core_x'] = m1_core_x
layout_info['m1_core_y'] = m1_core_y
layout_info['m1_w'] = m1_w
layout_info['m1_h'] = m1_h
return layout_info<|docstring|>Compute core layout information dictionary.
This method checks max PO and min OD density rules.<|endoftext|>
|
c1080bc56361dacbf1bc802eb319755fbb7b655347e0a235c1e791e5b0f8e544
|
def get_lr_edge_info(self, grid, core_info, wedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of LR edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return LR edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_encx = self.res_config['imp_enc'][0]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_x = core_info['m1_core_x']
m1_w = core_info['m1_w']
lr_od_xloc = core_info['lr_od_xloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spx = ((wcore - wres) // 2)
if (wres_lr > 0):
wedge_min = ((((edge_margin // 2) + imp_encx) + wres_lr) + spx)
well_xl = (((wedge - spx) - wres_lr) - imp_encx)
else:
od_xl = (lr_od_xloc[0][0] if lr_od_xloc else 0)
imp_encx_edge = max((imp_encx - spx), 0, (- od_xl))
wedge_min = ((edge_margin // 2) + imp_encx_edge)
well_xl = (wedge - imp_encx_edge)
if (wedge < wedge_min):
return None
max_res_area = int(((wedge * hcore) * po_max_density))
if ((wres_lr * lres) > max_res_area):
return None
sp_xl = (m1_sp_bnd + m1_w)
sp_xr = (wedge + m1_core_x[0][0])
if (sp_xr < sp_xl):
m1_edge_x = []
else:
m1_sp = (m1_core_x[0][0] * 2)
if ((((sp_xr - sp_xl) - m1_w) % 2) != 0):
sp_xl -= 1
m1_edge_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
if ((sp_xr - sp_xl) >= m1_sp):
m1_edge_x.insert(0, (m1_sp_bnd, sp_xl))
return dict(well_xl=well_xl, m1_edge_x=m1_edge_x)
|
Returns a dictionary of LR edge layout information.
This method checks:
1. spacing rules.
2. PO density rules
if all these pass, return LR edge layout information dictionary.
|
abs_templates_ec/resistor/planar.py
|
get_lr_edge_info
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_lr_edge_info(self, grid, core_info, wedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of LR edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return LR edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_encx = self.res_config['imp_enc'][0]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_x = core_info['m1_core_x']
m1_w = core_info['m1_w']
lr_od_xloc = core_info['lr_od_xloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spx = ((wcore - wres) // 2)
if (wres_lr > 0):
wedge_min = ((((edge_margin // 2) + imp_encx) + wres_lr) + spx)
well_xl = (((wedge - spx) - wres_lr) - imp_encx)
else:
od_xl = (lr_od_xloc[0][0] if lr_od_xloc else 0)
imp_encx_edge = max((imp_encx - spx), 0, (- od_xl))
wedge_min = ((edge_margin // 2) + imp_encx_edge)
well_xl = (wedge - imp_encx_edge)
if (wedge < wedge_min):
return None
max_res_area = int(((wedge * hcore) * po_max_density))
if ((wres_lr * lres) > max_res_area):
return None
sp_xl = (m1_sp_bnd + m1_w)
sp_xr = (wedge + m1_core_x[0][0])
if (sp_xr < sp_xl):
m1_edge_x = []
else:
m1_sp = (m1_core_x[0][0] * 2)
if ((((sp_xr - sp_xl) - m1_w) % 2) != 0):
sp_xl -= 1
m1_edge_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
if ((sp_xr - sp_xl) >= m1_sp):
m1_edge_x.insert(0, (m1_sp_bnd, sp_xl))
return dict(well_xl=well_xl, m1_edge_x=m1_edge_x)
|
def get_lr_edge_info(self, grid, core_info, wedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of LR edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return LR edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_encx = self.res_config['imp_enc'][0]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_x = core_info['m1_core_x']
m1_w = core_info['m1_w']
lr_od_xloc = core_info['lr_od_xloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spx = ((wcore - wres) // 2)
if (wres_lr > 0):
wedge_min = ((((edge_margin // 2) + imp_encx) + wres_lr) + spx)
well_xl = (((wedge - spx) - wres_lr) - imp_encx)
else:
od_xl = (lr_od_xloc[0][0] if lr_od_xloc else 0)
imp_encx_edge = max((imp_encx - spx), 0, (- od_xl))
wedge_min = ((edge_margin // 2) + imp_encx_edge)
well_xl = (wedge - imp_encx_edge)
if (wedge < wedge_min):
return None
max_res_area = int(((wedge * hcore) * po_max_density))
if ((wres_lr * lres) > max_res_area):
return None
sp_xl = (m1_sp_bnd + m1_w)
sp_xr = (wedge + m1_core_x[0][0])
if (sp_xr < sp_xl):
m1_edge_x = []
else:
m1_sp = (m1_core_x[0][0] * 2)
if ((((sp_xr - sp_xl) - m1_w) % 2) != 0):
sp_xl -= 1
m1_edge_x = fill_symmetric_const_space((sp_xr - sp_xl), m1_sp_max, m1_w, m1_w, offset=sp_xl)
if ((sp_xr - sp_xl) >= m1_sp):
m1_edge_x.insert(0, (m1_sp_bnd, sp_xl))
return dict(well_xl=well_xl, m1_edge_x=m1_edge_x)<|docstring|>Returns a dictionary of LR edge layout information.
This method checks:
1. spacing rules.
2. PO density rules
if all these pass, return LR edge layout information dictionary.<|endoftext|>
|
fee6f6793e836edc3e7ec4c3599452371a858d6806933b41959ccfbe64f6b9c1
|
def get_tb_edge_info(self, grid, core_info, hedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of TB edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return TB edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_ency = self.res_config['imp_enc'][1]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_y = core_info['m1_core_y']
m1_h = core_info['m1_h']
bot_od_yloc = core_info['bot_od_yloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spy = ((hcore - lres) // 2)
if (lres_tb > 0):
hedge_min = ((((edge_margin // 2) + imp_ency) + lres_tb) + spy)
well_yb = (((hedge - spy) - lres_tb) - imp_ency)
else:
od_yb = (bot_od_yloc[0][0] if bot_od_yloc else 0)
imp_ency_edge = max((imp_ency - spy), 0, (- od_yb))
hedge_min = ((edge_margin // 2) + imp_ency_edge)
well_yb = (hedge - imp_ency_edge)
if (hedge < hedge_min):
return None
max_res_area = int(((hedge * wcore) * po_max_density))
if ((wres * lres_tb) > max_res_area):
return None
sp_yb = (m1_sp_bnd + m1_h)
sp_yt = (hedge + m1_core_y[0][0])
if (sp_yt < sp_yb):
m1_edge_y = []
else:
m1_sp = (m1_core_y[0][0] * 2)
if ((((sp_yt - sp_yb) - m1_h) % 2) != 0):
sp_yb -= 1
m1_edge_y = fill_symmetric_const_space((sp_yt - sp_yb), m1_sp_max, m1_h, m1_h, offset=sp_yb)
if ((sp_yt - sp_yb) >= m1_sp):
m1_edge_y.insert(0, (m1_sp_bnd, sp_yb))
return dict(well_yb=well_yb, m1_edge_y=m1_edge_y)
|
Returns a dictionary of TB edge layout information.
This method checks:
1. spacing rules.
2. PO density rules
if all these pass, return TB edge layout information dictionary.
|
abs_templates_ec/resistor/planar.py
|
get_tb_edge_info
|
boblinchuan/BAG2_TEMPLATES_EC
| 1
|
python
|
def get_tb_edge_info(self, grid, core_info, hedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of TB edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return TB edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_ency = self.res_config['imp_enc'][1]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_y = core_info['m1_core_y']
m1_h = core_info['m1_h']
bot_od_yloc = core_info['bot_od_yloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spy = ((hcore - lres) // 2)
if (lres_tb > 0):
hedge_min = ((((edge_margin // 2) + imp_ency) + lres_tb) + spy)
well_yb = (((hedge - spy) - lres_tb) - imp_ency)
else:
od_yb = (bot_od_yloc[0][0] if bot_od_yloc else 0)
imp_ency_edge = max((imp_ency - spy), 0, (- od_yb))
hedge_min = ((edge_margin // 2) + imp_ency_edge)
well_yb = (hedge - imp_ency_edge)
if (hedge < hedge_min):
return None
max_res_area = int(((hedge * wcore) * po_max_density))
if ((wres * lres_tb) > max_res_area):
return None
sp_yb = (m1_sp_bnd + m1_h)
sp_yt = (hedge + m1_core_y[0][0])
if (sp_yt < sp_yb):
m1_edge_y = []
else:
m1_sp = (m1_core_y[0][0] * 2)
if ((((sp_yt - sp_yb) - m1_h) % 2) != 0):
sp_yb -= 1
m1_edge_y = fill_symmetric_const_space((sp_yt - sp_yb), m1_sp_max, m1_h, m1_h, offset=sp_yb)
if ((sp_yt - sp_yb) >= m1_sp):
m1_edge_y.insert(0, (m1_sp_bnd, sp_yb))
return dict(well_yb=well_yb, m1_edge_y=m1_edge_y)
|
def get_tb_edge_info(self, grid, core_info, hedge, l, w, res_type, sub_type, threshold, track_widths, track_spaces, options):
'Returns a dictionary of TB edge layout information.\n\n This method checks:\n 1. spacing rules.\n 2. PO density rules\n\n if all these pass, return TB edge layout information dictionary.\n '
edge_margin = self.res_config['edge_margin']
imp_ency = self.res_config['imp_enc'][1]
po_max_density = self.res_config['po_max_density']
m1_sp_max = self.res_config['m1_sp_max']
m1_sp_bnd = self.res_config['m1_sp_bnd']
wcore = core_info['width']
hcore = core_info['height']
m1_core_y = core_info['m1_core_y']
m1_h = core_info['m1_h']
bot_od_yloc = core_info['bot_od_yloc']
(wres, lres, wres_lr, lres_tb) = self.get_res_dimension(l, w)
spy = ((hcore - lres) // 2)
if (lres_tb > 0):
hedge_min = ((((edge_margin // 2) + imp_ency) + lres_tb) + spy)
well_yb = (((hedge - spy) - lres_tb) - imp_ency)
else:
od_yb = (bot_od_yloc[0][0] if bot_od_yloc else 0)
imp_ency_edge = max((imp_ency - spy), 0, (- od_yb))
hedge_min = ((edge_margin // 2) + imp_ency_edge)
well_yb = (hedge - imp_ency_edge)
if (hedge < hedge_min):
return None
max_res_area = int(((hedge * wcore) * po_max_density))
if ((wres * lres_tb) > max_res_area):
return None
sp_yb = (m1_sp_bnd + m1_h)
sp_yt = (hedge + m1_core_y[0][0])
if (sp_yt < sp_yb):
m1_edge_y = []
else:
m1_sp = (m1_core_y[0][0] * 2)
if ((((sp_yt - sp_yb) - m1_h) % 2) != 0):
sp_yb -= 1
m1_edge_y = fill_symmetric_const_space((sp_yt - sp_yb), m1_sp_max, m1_h, m1_h, offset=sp_yb)
if ((sp_yt - sp_yb) >= m1_sp):
m1_edge_y.insert(0, (m1_sp_bnd, sp_yb))
return dict(well_yb=well_yb, m1_edge_y=m1_edge_y)<|docstring|>Returns a dictionary of TB edge layout information.
This method checks:
1. spacing rules.
2. PO density rules
if all these pass, return TB edge layout information dictionary.<|endoftext|>
|
7035aef56e3494efa6ceede2e27b5cc124c8c93877a9edee5bd2ffede12c0337
|
def load_paths(path):
'\n Loads pdf and tiff files from the root folder as a 1-D list.\n -----------------------------------------------------------\n :param\n path: str, path of the root dir where to for PDF and TIFFs\n -----------------------------------------------------------\n :return: list with all pdf and tiffs found\n note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!\n '
paths = glob.glob((path + '/**'), recursive=True)
pdfs = [i for i in paths if ('.pdf' in i)]
pdfs_ = [i for i in paths if ('.PDF' in i)]
tiff = [i for i in paths if ('.tiff' in i)]
final_list = np.hstack((pdfs, pdfs_, tiff))
print(('Total of %d files were found' % len(final_list)))
return final_list
|
Loads pdf and tiff files from the root folder as a 1-D list.
-----------------------------------------------------------
:param
path: str, path of the root dir where to for PDF and TIFFs
-----------------------------------------------------------
:return: list with all pdf and tiffs found
note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!
|
utils.py
|
load_paths
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def load_paths(path):
'\n Loads pdf and tiff files from the root folder as a 1-D list.\n -----------------------------------------------------------\n :param\n path: str, path of the root dir where to for PDF and TIFFs\n -----------------------------------------------------------\n :return: list with all pdf and tiffs found\n note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!\n '
paths = glob.glob((path + '/**'), recursive=True)
pdfs = [i for i in paths if ('.pdf' in i)]
pdfs_ = [i for i in paths if ('.PDF' in i)]
tiff = [i for i in paths if ('.tiff' in i)]
final_list = np.hstack((pdfs, pdfs_, tiff))
print(('Total of %d files were found' % len(final_list)))
return final_list
|
def load_paths(path):
'\n Loads pdf and tiff files from the root folder as a 1-D list.\n -----------------------------------------------------------\n :param\n path: str, path of the root dir where to for PDF and TIFFs\n -----------------------------------------------------------\n :return: list with all pdf and tiffs found\n note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!\n '
paths = glob.glob((path + '/**'), recursive=True)
pdfs = [i for i in paths if ('.pdf' in i)]
pdfs_ = [i for i in paths if ('.PDF' in i)]
tiff = [i for i in paths if ('.tiff' in i)]
final_list = np.hstack((pdfs, pdfs_, tiff))
print(('Total of %d files were found' % len(final_list)))
return final_list<|docstring|>Loads pdf and tiff files from the root folder as a 1-D list.
-----------------------------------------------------------
:param
path: str, path of the root dir where to for PDF and TIFFs
-----------------------------------------------------------
:return: list with all pdf and tiffs found
note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!<|endoftext|>
|
9d04373860c6e323fe8d9cecbe649b7eb871a15533f688649e03b05effbb21b2
|
def chunk_generator(l, batch_size):
'\n Given any list and a batch size, returns a list of lists where each element is a list containing\n N (BATCH_SIZE) elements.\n -----------------------------------------------------------\n :param\n l: a 1-D list\n batch_size: Batch size of a chunk\n -----------------------------------------------------------\n :return: list of lists of batches\n '
chunks = [l[i:(i + batch_size)] for i in range(0, len(l), batch_size)]
return chunks
|
Given any list and a batch size, returns a list of lists where each element is a list containing
N (BATCH_SIZE) elements.
-----------------------------------------------------------
:param
l: a 1-D list
batch_size: Batch size of a chunk
-----------------------------------------------------------
:return: list of lists of batches
|
utils.py
|
chunk_generator
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def chunk_generator(l, batch_size):
'\n Given any list and a batch size, returns a list of lists where each element is a list containing\n N (BATCH_SIZE) elements.\n -----------------------------------------------------------\n :param\n l: a 1-D list\n batch_size: Batch size of a chunk\n -----------------------------------------------------------\n :return: list of lists of batches\n '
chunks = [l[i:(i + batch_size)] for i in range(0, len(l), batch_size)]
return chunks
|
def chunk_generator(l, batch_size):
'\n Given any list and a batch size, returns a list of lists where each element is a list containing\n N (BATCH_SIZE) elements.\n -----------------------------------------------------------\n :param\n l: a 1-D list\n batch_size: Batch size of a chunk\n -----------------------------------------------------------\n :return: list of lists of batches\n '
chunks = [l[i:(i + batch_size)] for i in range(0, len(l), batch_size)]
return chunks<|docstring|>Given any list and a batch size, returns a list of lists where each element is a list containing
N (BATCH_SIZE) elements.
-----------------------------------------------------------
:param
l: a 1-D list
batch_size: Batch size of a chunk
-----------------------------------------------------------
:return: list of lists of batches<|endoftext|>
|
d36c290da180ac840801e37ccab3d7f09d57d45b43e4d2c0ebeeaa4d84d4f403
|
def get_size(all_paths):
'\n Returns the size of a file given a path. If list is given returns the size of all files.\n -----------------------------------------------------------\n :param\n all_paths: list of paths of files to calculate size\n -----------------------------------------------------------\n :return:\n Size of file(s) in MegaBytes\n '
total_size = 0
for i in all_paths:
total_size += os.path.getsize(i)
return (total_size / 1024000)
|
Returns the size of a file given a path. If list is given returns the size of all files.
-----------------------------------------------------------
:param
all_paths: list of paths of files to calculate size
-----------------------------------------------------------
:return:
Size of file(s) in MegaBytes
|
utils.py
|
get_size
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def get_size(all_paths):
'\n Returns the size of a file given a path. If list is given returns the size of all files.\n -----------------------------------------------------------\n :param\n all_paths: list of paths of files to calculate size\n -----------------------------------------------------------\n :return:\n Size of file(s) in MegaBytes\n '
total_size = 0
for i in all_paths:
total_size += os.path.getsize(i)
return (total_size / 1024000)
|
def get_size(all_paths):
'\n Returns the size of a file given a path. If list is given returns the size of all files.\n -----------------------------------------------------------\n :param\n all_paths: list of paths of files to calculate size\n -----------------------------------------------------------\n :return:\n Size of file(s) in MegaBytes\n '
total_size = 0
for i in all_paths:
total_size += os.path.getsize(i)
return (total_size / 1024000)<|docstring|>Returns the size of a file given a path. If list is given returns the size of all files.
-----------------------------------------------------------
:param
all_paths: list of paths of files to calculate size
-----------------------------------------------------------
:return:
Size of file(s) in MegaBytes<|endoftext|>
|
6ab08e75b908a72470565843288808738b836003c728fb99f51d4991da41a8ec
|
def read_tiff(path):
'\n Returns a list of image objects given a .tiff file.\n -----------------------------------------------------------\n :param\n path: path to a tiff file\n -----------------------------------------------------------\n :return:\n List of image objects from tiff ( number of images = number of pages in tiff)\n '
img = P.Image.open(path)
images = []
for i in range(img.n_frames):
img.seek(i)
images.append(P.Image.fromarray(np.array(img)))
return images
|
Returns a list of image objects given a .tiff file.
-----------------------------------------------------------
:param
path: path to a tiff file
-----------------------------------------------------------
:return:
List of image objects from tiff ( number of images = number of pages in tiff)
|
utils.py
|
read_tiff
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def read_tiff(path):
'\n Returns a list of image objects given a .tiff file.\n -----------------------------------------------------------\n :param\n path: path to a tiff file\n -----------------------------------------------------------\n :return:\n List of image objects from tiff ( number of images = number of pages in tiff)\n '
img = P.Image.open(path)
images = []
for i in range(img.n_frames):
img.seek(i)
images.append(P.Image.fromarray(np.array(img)))
return images
|
def read_tiff(path):
'\n Returns a list of image objects given a .tiff file.\n -----------------------------------------------------------\n :param\n path: path to a tiff file\n -----------------------------------------------------------\n :return:\n List of image objects from tiff ( number of images = number of pages in tiff)\n '
img = P.Image.open(path)
images = []
for i in range(img.n_frames):
img.seek(i)
images.append(P.Image.fromarray(np.array(img)))
return images<|docstring|>Returns a list of image objects given a .tiff file.
-----------------------------------------------------------
:param
path: path to a tiff file
-----------------------------------------------------------
:return:
List of image objects from tiff ( number of images = number of pages in tiff)<|endoftext|>
|
07409560895dd3dbc092d9edbf0862b606149fe6b4d2ec7e2fc480ef614c9897
|
def pdf2images(path):
'\n Returns a list of image objects from pdf.\n -----------------------------------------------------------\n :param\n path: path to pdf file\n -----------------------------------------------------------\n :return:\n list of image objects from the pdf\n '
doc = fitz.open(path)
images = []
for i in range(len(doc)):
imglist = doc.getPageImageList(i)
for img in imglist:
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if (pix.n < 5):
images.append(bytes_to_image(pix.getPNGData()))
else:
pix0 = fitz.Pixmap(fitz.csRGB, pix)
images.append(bytes_to_image(pix0.getPNGData()))
pix0 = None
pix = None
return images
|
Returns a list of image objects from pdf.
-----------------------------------------------------------
:param
path: path to pdf file
-----------------------------------------------------------
:return:
list of image objects from the pdf
|
utils.py
|
pdf2images
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def pdf2images(path):
'\n Returns a list of image objects from pdf.\n -----------------------------------------------------------\n :param\n path: path to pdf file\n -----------------------------------------------------------\n :return:\n list of image objects from the pdf\n '
doc = fitz.open(path)
images = []
for i in range(len(doc)):
imglist = doc.getPageImageList(i)
for img in imglist:
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if (pix.n < 5):
images.append(bytes_to_image(pix.getPNGData()))
else:
pix0 = fitz.Pixmap(fitz.csRGB, pix)
images.append(bytes_to_image(pix0.getPNGData()))
pix0 = None
pix = None
return images
|
def pdf2images(path):
'\n Returns a list of image objects from pdf.\n -----------------------------------------------------------\n :param\n path: path to pdf file\n -----------------------------------------------------------\n :return:\n list of image objects from the pdf\n '
doc = fitz.open(path)
images = []
for i in range(len(doc)):
imglist = doc.getPageImageList(i)
for img in imglist:
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if (pix.n < 5):
images.append(bytes_to_image(pix.getPNGData()))
else:
pix0 = fitz.Pixmap(fitz.csRGB, pix)
images.append(bytes_to_image(pix0.getPNGData()))
pix0 = None
pix = None
return images<|docstring|>Returns a list of image objects from pdf.
-----------------------------------------------------------
:param
path: path to pdf file
-----------------------------------------------------------
:return:
list of image objects from the pdf<|endoftext|>
|
e25f11e6a028bd90c667905f7ce059d7e9aa6cf41ae6150febb85323266ae186
|
def bytes_to_image(image_bytes):
'\n Converts byte image to a PIL image object.\n -----------------------------------------------------------\n :param\n image_bytes: image in Bytes format\n -----------------------------------------------------------\n :return:\n PIL image\n '
imgstream = io.BytesIO(image_bytes)
imageFile = P.Image.open(imgstream)
return imageFile
|
Converts byte image to a PIL image object.
-----------------------------------------------------------
:param
image_bytes: image in Bytes format
-----------------------------------------------------------
:return:
PIL image
|
utils.py
|
bytes_to_image
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def bytes_to_image(image_bytes):
'\n Converts byte image to a PIL image object.\n -----------------------------------------------------------\n :param\n image_bytes: image in Bytes format\n -----------------------------------------------------------\n :return:\n PIL image\n '
imgstream = io.BytesIO(image_bytes)
imageFile = P.Image.open(imgstream)
return imageFile
|
def bytes_to_image(image_bytes):
'\n Converts byte image to a PIL image object.\n -----------------------------------------------------------\n :param\n image_bytes: image in Bytes format\n -----------------------------------------------------------\n :return:\n PIL image\n '
imgstream = io.BytesIO(image_bytes)
imageFile = P.Image.open(imgstream)
return imageFile<|docstring|>Converts byte image to a PIL image object.
-----------------------------------------------------------
:param
image_bytes: image in Bytes format
-----------------------------------------------------------
:return:
PIL image<|endoftext|>
|
daeadfa4a8dd68e21e9eac754866ef38f7622ad4f41b732687ac5d351e9aa61e
|
def create_destination_dirs(config):
'\n Creates logs and save dirs\n -----------------------------------------------------------\n :param\n config: config for initializing anonymize()\n -----------------------------------------------------------\n :return:\n tuple (str,str) - (path to save dir, path to logs dir)\n '
try:
save_folder = os.mkdir((config.path_to_save_dir + '/anonymized_images/'))
except FileExistsError:
save_folder = (config.path_to_save_dir + '/anonymized_images/')
try:
logs_folder = os.mkdir((config.path_to_save_logs + '/logs/'))
logs_df = pd.DataFrame(columns=['path', 'annotations'])
logs_df.to_csv(((config.path_to_save_logs + '/logs/') + 'logs.csv'), index=False)
except FileExistsError:
logs_folder = (config.path_to_save_logs + '/logs/')
return ((config.path_to_save_dir + '/anonymized_images/'), (config.path_to_save_logs + '/logs/'))
|
Creates logs and save dirs
-----------------------------------------------------------
:param
config: config for initializing anonymize()
-----------------------------------------------------------
:return:
tuple (str,str) - (path to save dir, path to logs dir)
|
utils.py
|
create_destination_dirs
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def create_destination_dirs(config):
'\n Creates logs and save dirs\n -----------------------------------------------------------\n :param\n config: config for initializing anonymize()\n -----------------------------------------------------------\n :return:\n tuple (str,str) - (path to save dir, path to logs dir)\n '
try:
save_folder = os.mkdir((config.path_to_save_dir + '/anonymized_images/'))
except FileExistsError:
save_folder = (config.path_to_save_dir + '/anonymized_images/')
try:
logs_folder = os.mkdir((config.path_to_save_logs + '/logs/'))
logs_df = pd.DataFrame(columns=['path', 'annotations'])
logs_df.to_csv(((config.path_to_save_logs + '/logs/') + 'logs.csv'), index=False)
except FileExistsError:
logs_folder = (config.path_to_save_logs + '/logs/')
return ((config.path_to_save_dir + '/anonymized_images/'), (config.path_to_save_logs + '/logs/'))
|
def create_destination_dirs(config):
'\n Creates logs and save dirs\n -----------------------------------------------------------\n :param\n config: config for initializing anonymize()\n -----------------------------------------------------------\n :return:\n tuple (str,str) - (path to save dir, path to logs dir)\n '
try:
save_folder = os.mkdir((config.path_to_save_dir + '/anonymized_images/'))
except FileExistsError:
save_folder = (config.path_to_save_dir + '/anonymized_images/')
try:
logs_folder = os.mkdir((config.path_to_save_logs + '/logs/'))
logs_df = pd.DataFrame(columns=['path', 'annotations'])
logs_df.to_csv(((config.path_to_save_logs + '/logs/') + 'logs.csv'), index=False)
except FileExistsError:
logs_folder = (config.path_to_save_logs + '/logs/')
return ((config.path_to_save_dir + '/anonymized_images/'), (config.path_to_save_logs + '/logs/'))<|docstring|>Creates logs and save dirs
-----------------------------------------------------------
:param
config: config for initializing anonymize()
-----------------------------------------------------------
:return:
tuple (str,str) - (path to save dir, path to logs dir)<|endoftext|>
|
2bf63645b28b82995824400cd2ae55b368e2973097301ec7c3fcbadfb1c47e5d
|
def from_pil_image(pil_img):
'\n Returns a graphlab.Image constructed from the passed PIL Image\n -----------------------------------------------------------\n Parameters\n -----------------------------------------------------------\n pil_img : PIL.Image\n A PIL Image that is to be converted to a graphlab.Image\n -----------------------------------------------------------\n Returns\n out: graphlab.Image\n The input converted to a graphlab.Image\n -----------------------------------------------------------\n '
height = pil_img.size[1]
width = pil_img.size[0]
_format = {'JPG': 0, 'PNG': 1, 'RAW': 2, 'UNDEFINED': 3}
if (pil_img.mode == 'L'):
image_data = bytearray([z for z in pil_img.getdata()])
channels = 1
elif (pil_img.mode == 'RGB'):
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 3
else:
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 4
format_enum = _format['RAW']
image_data_size = len(image_data)
img = tc.Image(_image_data=image_data, _width=width, _height=height, _channels=channels, _format_enum=format_enum, _image_data_size=image_data_size)
return img
|
Returns a graphlab.Image constructed from the passed PIL Image
-----------------------------------------------------------
Parameters
-----------------------------------------------------------
pil_img : PIL.Image
A PIL Image that is to be converted to a graphlab.Image
-----------------------------------------------------------
Returns
out: graphlab.Image
The input converted to a graphlab.Image
-----------------------------------------------------------
|
utils.py
|
from_pil_image
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def from_pil_image(pil_img):
'\n Returns a graphlab.Image constructed from the passed PIL Image\n -----------------------------------------------------------\n Parameters\n -----------------------------------------------------------\n pil_img : PIL.Image\n A PIL Image that is to be converted to a graphlab.Image\n -----------------------------------------------------------\n Returns\n out: graphlab.Image\n The input converted to a graphlab.Image\n -----------------------------------------------------------\n '
height = pil_img.size[1]
width = pil_img.size[0]
_format = {'JPG': 0, 'PNG': 1, 'RAW': 2, 'UNDEFINED': 3}
if (pil_img.mode == 'L'):
image_data = bytearray([z for z in pil_img.getdata()])
channels = 1
elif (pil_img.mode == 'RGB'):
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 3
else:
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 4
format_enum = _format['RAW']
image_data_size = len(image_data)
img = tc.Image(_image_data=image_data, _width=width, _height=height, _channels=channels, _format_enum=format_enum, _image_data_size=image_data_size)
return img
|
def from_pil_image(pil_img):
'\n Returns a graphlab.Image constructed from the passed PIL Image\n -----------------------------------------------------------\n Parameters\n -----------------------------------------------------------\n pil_img : PIL.Image\n A PIL Image that is to be converted to a graphlab.Image\n -----------------------------------------------------------\n Returns\n out: graphlab.Image\n The input converted to a graphlab.Image\n -----------------------------------------------------------\n '
height = pil_img.size[1]
width = pil_img.size[0]
_format = {'JPG': 0, 'PNG': 1, 'RAW': 2, 'UNDEFINED': 3}
if (pil_img.mode == 'L'):
image_data = bytearray([z for z in pil_img.getdata()])
channels = 1
elif (pil_img.mode == 'RGB'):
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 3
else:
image_data = bytearray([z for l in pil_img.getdata() for z in l])
channels = 4
format_enum = _format['RAW']
image_data_size = len(image_data)
img = tc.Image(_image_data=image_data, _width=width, _height=height, _channels=channels, _format_enum=format_enum, _image_data_size=image_data_size)
return img<|docstring|>Returns a graphlab.Image constructed from the passed PIL Image
-----------------------------------------------------------
Parameters
-----------------------------------------------------------
pil_img : PIL.Image
A PIL Image that is to be converted to a graphlab.Image
-----------------------------------------------------------
Returns
out: graphlab.Image
The input converted to a graphlab.Image
-----------------------------------------------------------<|endoftext|>
|
6d6969015e4a7aa117d61e32a5712bff0bd92f79ca14edd32a746ee0d41c394b
|
def pil2cv2(pil_image):
'\n Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)\n -----------------------------------------------------------\n :param\n pil_image: PIL image format\n -----------------------------------------------------------\n :return:\n cv2 image\n '
open_cv_image = np.array(pil_image)
try:
open_cv_image = open_cv_image[(:, :, ::(- 1))].copy()
except IndexError:
pass
if (len(open_cv_image.shape) == 2):
reshaped_img = np.stack(((open_cv_image,) * 3), axis=(- 1))
return reshaped_img
else:
return open_cv_image
|
Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)
-----------------------------------------------------------
:param
pil_image: PIL image format
-----------------------------------------------------------
:return:
cv2 image
|
utils.py
|
pil2cv2
|
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates
| 0
|
python
|
def pil2cv2(pil_image):
'\n Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)\n -----------------------------------------------------------\n :param\n pil_image: PIL image format\n -----------------------------------------------------------\n :return:\n cv2 image\n '
open_cv_image = np.array(pil_image)
try:
open_cv_image = open_cv_image[(:, :, ::(- 1))].copy()
except IndexError:
pass
if (len(open_cv_image.shape) == 2):
reshaped_img = np.stack(((open_cv_image,) * 3), axis=(- 1))
return reshaped_img
else:
return open_cv_image
|
def pil2cv2(pil_image):
'\n Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)\n -----------------------------------------------------------\n :param\n pil_image: PIL image format\n -----------------------------------------------------------\n :return:\n cv2 image\n '
open_cv_image = np.array(pil_image)
try:
open_cv_image = open_cv_image[(:, :, ::(- 1))].copy()
except IndexError:
pass
if (len(open_cv_image.shape) == 2):
reshaped_img = np.stack(((open_cv_image,) * 3), axis=(- 1))
return reshaped_img
else:
return open_cv_image<|docstring|>Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)
-----------------------------------------------------------
:param
pil_image: PIL image format
-----------------------------------------------------------
:return:
cv2 image<|endoftext|>
|
35b23c15574cb5df7c3b24a0b78b566f8a477b50a6b6d25f34328e36f190c32e
|
def test_behaviour():
'Test that the function behaves as expected.'
dev = qml.device('default.qubit', wires=2)
with qml.tape.JacobianTape() as tape:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.Hamiltonian([(- 1.5), 2.0], [qml.PauliZ(0), qml.PauliZ(1)]))
tape.trainable_params = {2, 3}
(tapes, processing_fn) = hamiltonian_grad(tape, idx=0)
res1 = processing_fn(dev.batch_execute(tapes))
(tapes, processing_fn) = hamiltonian_grad(tape, idx=1)
res2 = processing_fn(dev.batch_execute(tapes))
with qml.tape.JacobianTape() as tape1:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.JacobianTape() as tape2:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
dev.reset()
res_expected1 = qml.math.squeeze(dev.execute(tape1))
dev.reset()
res_expected2 = qml.math.squeeze(dev.execute(tape2))
assert (res_expected1 == res1)
assert (res_expected2 == res2)
|
Test that the function behaves as expected.
|
tests/gradients/test_hamiltonian_gradient.py
|
test_behaviour
|
dime10/pennylane
| 712
|
python
|
def test_behaviour():
dev = qml.device('default.qubit', wires=2)
with qml.tape.JacobianTape() as tape:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.Hamiltonian([(- 1.5), 2.0], [qml.PauliZ(0), qml.PauliZ(1)]))
tape.trainable_params = {2, 3}
(tapes, processing_fn) = hamiltonian_grad(tape, idx=0)
res1 = processing_fn(dev.batch_execute(tapes))
(tapes, processing_fn) = hamiltonian_grad(tape, idx=1)
res2 = processing_fn(dev.batch_execute(tapes))
with qml.tape.JacobianTape() as tape1:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.JacobianTape() as tape2:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
dev.reset()
res_expected1 = qml.math.squeeze(dev.execute(tape1))
dev.reset()
res_expected2 = qml.math.squeeze(dev.execute(tape2))
assert (res_expected1 == res1)
assert (res_expected2 == res2)
|
def test_behaviour():
dev = qml.device('default.qubit', wires=2)
with qml.tape.JacobianTape() as tape:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.Hamiltonian([(- 1.5), 2.0], [qml.PauliZ(0), qml.PauliZ(1)]))
tape.trainable_params = {2, 3}
(tapes, processing_fn) = hamiltonian_grad(tape, idx=0)
res1 = processing_fn(dev.batch_execute(tapes))
(tapes, processing_fn) = hamiltonian_grad(tape, idx=1)
res2 = processing_fn(dev.batch_execute(tapes))
with qml.tape.JacobianTape() as tape1:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.JacobianTape() as tape2:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
dev.reset()
res_expected1 = qml.math.squeeze(dev.execute(tape1))
dev.reset()
res_expected2 = qml.math.squeeze(dev.execute(tape2))
assert (res_expected1 == res1)
assert (res_expected2 == res2)<|docstring|>Test that the function behaves as expected.<|endoftext|>
|
9422ed667247641558cb186d9083964c1ee698d852f09baa541f4cb5b30db43f
|
def test_override_headers(self):
'\n Ensure we work even if response.headers is set to something other than a MultiDict.\n '
for resp in self.iter_responses('/'):
self.assertTrue((ACL_ORIGIN in resp.headers))
|
Ensure we work even if response.headers is set to something other than a MultiDict.
|
tests/core/test_override_headers.py
|
test_override_headers
|
tirkarthi/flask-cors
| 749
|
python
|
def test_override_headers(self):
'\n \n '
for resp in self.iter_responses('/'):
self.assertTrue((ACL_ORIGIN in resp.headers))
|
def test_override_headers(self):
'\n \n '
for resp in self.iter_responses('/'):
self.assertTrue((ACL_ORIGIN in resp.headers))<|docstring|>Ensure we work even if response.headers is set to something other than a MultiDict.<|endoftext|>
|
5f6c469a61513c7e99adf318acccf3b55ee2fff5ebe8b35568ad30affcb51f80
|
def get_traces(data, one_timestamp):
'\n returns the data splitted by caseid and ordered by start_timestamp\n '
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = ('end_timestamp' if one_timestamp else 'start_timestamp')
trace = sorted(list(filter((lambda x: (x['caseid'] == case)), data)), key=itemgetter(order_key))
traces.append(trace)
return traces
|
returns the data splitted by caseid and ordered by start_timestamp
|
src/simod/log_repairing/conformance_checking.py
|
get_traces
|
AdaptiveBProcess/SiMo-Discoverer
| 12
|
python
|
def get_traces(data, one_timestamp):
'\n \n '
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = ('end_timestamp' if one_timestamp else 'start_timestamp')
trace = sorted(list(filter((lambda x: (x['caseid'] == case)), data)), key=itemgetter(order_key))
traces.append(trace)
return traces
|
def get_traces(data, one_timestamp):
'\n \n '
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = ('end_timestamp' if one_timestamp else 'start_timestamp')
trace = sorted(list(filter((lambda x: (x['caseid'] == case)), data)), key=itemgetter(order_key))
traces.append(trace)
return traces<|docstring|>returns the data splitted by caseid and ordered by start_timestamp<|endoftext|>
|
e83883c0a877ce0918776cbe433f226639e59ccd18d06f4d8c8970aa315804f7
|
def upgrade(tool):
'Added D3.js library and D3 Control-chart for Instrument QC\n '
from bika.lims.upgrade import skip_pre315
if skip_pre315(aq_parent(aq_inner(tool))):
return True
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')
return True
|
Added D3.js library and D3 Control-chart for Instrument QC
|
bika/lims/upgrade/to3005.py
|
upgrade
|
hocinebendou/bika.gsoc
| 0
|
python
|
def upgrade(tool):
'\n '
from bika.lims.upgrade import skip_pre315
if skip_pre315(aq_parent(aq_inner(tool))):
return True
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')
return True
|
def upgrade(tool):
'\n '
from bika.lims.upgrade import skip_pre315
if skip_pre315(aq_parent(aq_inner(tool))):
return True
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')
return True<|docstring|>Added D3.js library and D3 Control-chart for Instrument QC<|endoftext|>
|
861774feb1395c667ddafbce1fccc379323bb327dc353cd1957efdd6dc9b1bf7
|
def run_proto_gen(self):
' Copies and runs the protoc compiler on the proto files'
cmds = ['./utils/pull_protos.sh', './utils/compile_protos.sh']
for cmd in cmds:
print('calling {} (workdir={})'.format(cmd, _library_root))
subprocess.check_call(cmd, cwd=_library_root)
|
Copies and runs the protoc compiler on the proto files
|
wrappers/python/docs/source/conf.py
|
run_proto_gen
|
bbingju/backend-apis
| 9
|
python
|
def run_proto_gen(self):
' '
cmds = ['./utils/pull_protos.sh', './utils/compile_protos.sh']
for cmd in cmds:
print('calling {} (workdir={})'.format(cmd, _library_root))
subprocess.check_call(cmd, cwd=_library_root)
|
def run_proto_gen(self):
' '
cmds = ['./utils/pull_protos.sh', './utils/compile_protos.sh']
for cmd in cmds:
print('calling {} (workdir={})'.format(cmd, _library_root))
subprocess.check_call(cmd, cwd=_library_root)<|docstring|>Copies and runs the protoc compiler on the proto files<|endoftext|>
|
625ab6a381695f540eca2ae8ee96d9506f5806f7e0cfa5ca63e0ea3f75751b14
|
def setup(app):
' Override for a custom sphinx build call. See manual on how to\n change the event when this action is triggered. '
app.connect('builder-inited', run_proto_gen)
|
Override for a custom sphinx build call. See manual on how to
change the event when this action is triggered.
|
wrappers/python/docs/source/conf.py
|
setup
|
bbingju/backend-apis
| 9
|
python
|
def setup(app):
' Override for a custom sphinx build call. See manual on how to\n change the event when this action is triggered. '
app.connect('builder-inited', run_proto_gen)
|
def setup(app):
' Override for a custom sphinx build call. See manual on how to\n change the event when this action is triggered. '
app.connect('builder-inited', run_proto_gen)<|docstring|>Override for a custom sphinx build call. See manual on how to
change the event when this action is triggered.<|endoftext|>
|
f1f66dded7dd494747c56218e252ca320d8734f6e3f50a29172f82ade2927638
|
def build_alphabet(data=None, names=None, name=None):
'\n Return an object representing an ordered alphabet.\n\n INPUT:\n\n - ``data`` -- the letters of the alphabet; it can be:\n\n * a list/tuple/iterable of letters; the iterable may be infinite\n * an integer `n` to represent `\\{1, \\ldots, n\\}`, or infinity to\n represent `\\NN`\n\n - ``names`` -- (optional) a list for the letters (i.e. variable names) or\n a string for prefix for all letters; if given a list, it must have the\n same cardinality as the set represented by ``data``\n\n - ``name`` -- (optional) if given, then return a named set and can be\n equal to : ``\'lower\', \'upper\', \'space\',\n \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\', \'decimal\',\n \'hexadecimal\', \'radix64\'``.\n\n You can use many of them at once, separated by spaces : ``\'lower\n punctuation\'`` represents the union of the two alphabets ``\'lower\'`` and\n ``\'punctuation\'``.\n\n Alternatively, ``name`` can be set to ``"positive integers"`` (or\n ``"PP"``) or ``"natural numbers"`` (or ``"NN"``).\n\n ``name`` cannot be combined with ``data``.\n\n EXAMPLES:\n\n If the argument is a Set, it just returns it::\n\n sage: build_alphabet(ZZ) is ZZ\n True\n sage: F = FiniteEnumeratedSet(\'abc\')\n sage: build_alphabet(F) is F\n True\n\n If a list, tuple or string is provided, then it builds a proper Sage class\n (:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`)::\n\n sage: build_alphabet([0,1,2])\n {0, 1, 2}\n sage: F = build_alphabet(\'abc\'); F\n {\'a\', \'b\', \'c\'}\n sage: print type(F).__name__\n TotallyOrderedFiniteSet_with_category\n\n If an integer and a set is given, then it constructs a\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`::\n\n sage: build_alphabet(3, [\'a\',\'b\',\'c\'])\n {\'a\', \'b\', \'c\'}\n\n If an integer and a string is given, then it considers that string as a\n prefix::\n\n sage: build_alphabet(3, \'x\')\n {\'x0\', \'x1\', \'x2\'}\n\n If no data is provided, ``name`` may be a string which describe an alphabet.\n The available names decompose into two families. The first one are \'positive\n integers\', \'PP\', \'natural numbers\' or \'NN\' which refer to standard set of\n numbers::\n\n sage: build_alphabet(name="positive integers")\n Positive integers\n sage: build_alphabet(name="PP")\n Positive integers\n sage: build_alphabet(name="natural numbers")\n Non negative integers\n sage: build_alphabet(name="NN")\n Non negative integers\n\n The other families for the option ``name`` are among \'lower\', \'upper\',\n \'space\', \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\',\n \'decimal\', \'hexadecimal\', \'radix64\' which refer to standard set of\n charaters. Theses names may be combined by separating them by a space::\n\n sage: build_alphabet(name="lower")\n {\'a\', \'b\', \'c\', \'d\', \'e\', \'f\', \'g\', \'h\', \'i\', \'j\', \'k\', \'l\', \'m\', \'n\', \'o\', \'p\', \'q\', \'r\', \'s\', \'t\', \'u\', \'v\', \'w\', \'x\', \'y\', \'z\'}\n sage: build_alphabet(name="hexadecimal")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \'a\', \'b\', \'c\', \'d\', \'e\', \'f\'}\n sage: build_alphabet(name="decimal punctuation")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \' \', \',\', \'.\', \';\', \':\', \'!\', \'?\'}\n\n In the case the alphabet is built from a list or a tuple, the order on the\n alphabet is given by the elements themselves::\n\n sage: A = build_alphabet([0,2,1])\n sage: A(0) < A(2)\n True\n sage: A(2) < A(1)\n False\n\n If a different order is needed, you may use\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet` and\n set the option ``facade`` to ``False``. That way, the comparison fits the\n order of the input::\n\n sage: A = TotallyOrderedFiniteSet([4,2,6,1], facade=False)\n sage: A(4) < A(2)\n True\n sage: A(1) < A(6)\n False\n\n Be careful, the element of the set in the last example are no more\n integers and do not compare equal with integers::\n\n sage: type(A.an_element())\n <class \'sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet_with_category.element_class\'>\n sage: A(1) == 1\n False\n sage: 1 == A(1)\n False\n\n We give an example of an infinite alphabet indexed by the positive\n integers and the prime numbers::\n\n sage: build_alphabet(oo, \'x\')\n Lazy family (x(i))_{i in Non negative integers}\n sage: build_alphabet(Primes(), \'y\')\n Lazy family (y(i))_{i in Set of all prime numbers: 2, 3, 5, 7, ...}\n\n TESTS::\n\n sage: Alphabet(3, name="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(8, [\'e\']*10)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(8, x)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(name=x, names="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(x)\n Traceback (most recent call last):\n ...\n ValueError: unable to construct an alphabet from the given parameters\n '
if ((name is not None) and ((data is not None) or (names is not None))):
raise ValueError('name cannot be specified with any other argument')
if (isinstance(names, (int, long, Integer)) or (names == Infinity) or ((data is None) and (names is not None))):
(data, names) = (names, data)
if isinstance(data, (int, long, Integer)):
if (names is None):
from sage.sets.integer_range import IntegerRange
return IntegerRange(Integer(data))
if isinstance(names, str):
return TotallyOrderedFiniteSet([(names + ('%d' % i)) for i in xrange(data)])
if (len(names) == data):
return TotallyOrderedFiniteSet(names)
raise ValueError('invalid value for names')
if (data == Infinity):
data = NonNegativeIntegers()
if (isinstance(data, (tuple, list, str)) or (data in Sets())):
if (names is not None):
if (not isinstance(names, str)):
raise TypeError('names must be a string when data is a set')
return Family(data, (lambda i: (names + str(i))), name=names)
if (data in Sets()):
return data
return TotallyOrderedFiniteSet(data)
if (name is not None):
if (not isinstance(name, str)):
raise TypeError('name must be a string')
if ((name == 'positive integers') or (name == 'PP')):
from sage.sets.positive_integers import PositiveIntegers
return PositiveIntegers()
if ((name == 'natural numbers') or (name == 'NN')):
return NonNegativeIntegers()
data = []
for alpha_name in name.split(' '):
try:
data.extend(list(set_of_letters[alpha_name]))
except KeyError:
raise TypeError('name is not recognized')
return TotallyOrderedFiniteSet(data)
if (data is None):
from sage.structure.parent import Set_PythonType
return Set_PythonType(object)
raise ValueError('unable to construct an alphabet from the given parameters')
|
Return an object representing an ordered alphabet.
INPUT:
- ``data`` -- the letters of the alphabet; it can be:
* a list/tuple/iterable of letters; the iterable may be infinite
* an integer `n` to represent `\{1, \ldots, n\}`, or infinity to
represent `\NN`
- ``names`` -- (optional) a list for the letters (i.e. variable names) or
a string for prefix for all letters; if given a list, it must have the
same cardinality as the set represented by ``data``
- ``name`` -- (optional) if given, then return a named set and can be
equal to : ``'lower', 'upper', 'space',
'underscore', 'punctuation', 'printable', 'binary', 'octal', 'decimal',
'hexadecimal', 'radix64'``.
You can use many of them at once, separated by spaces : ``'lower
punctuation'`` represents the union of the two alphabets ``'lower'`` and
``'punctuation'``.
Alternatively, ``name`` can be set to ``"positive integers"`` (or
``"PP"``) or ``"natural numbers"`` (or ``"NN"``).
``name`` cannot be combined with ``data``.
EXAMPLES:
If the argument is a Set, it just returns it::
sage: build_alphabet(ZZ) is ZZ
True
sage: F = FiniteEnumeratedSet('abc')
sage: build_alphabet(F) is F
True
If a list, tuple or string is provided, then it builds a proper Sage class
(:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`)::
sage: build_alphabet([0,1,2])
{0, 1, 2}
sage: F = build_alphabet('abc'); F
{'a', 'b', 'c'}
sage: print type(F).__name__
TotallyOrderedFiniteSet_with_category
If an integer and a set is given, then it constructs a
:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`::
sage: build_alphabet(3, ['a','b','c'])
{'a', 'b', 'c'}
If an integer and a string is given, then it considers that string as a
prefix::
sage: build_alphabet(3, 'x')
{'x0', 'x1', 'x2'}
If no data is provided, ``name`` may be a string which describe an alphabet.
The available names decompose into two families. The first one are 'positive
integers', 'PP', 'natural numbers' or 'NN' which refer to standard set of
numbers::
sage: build_alphabet(name="positive integers")
Positive integers
sage: build_alphabet(name="PP")
Positive integers
sage: build_alphabet(name="natural numbers")
Non negative integers
sage: build_alphabet(name="NN")
Non negative integers
The other families for the option ``name`` are among 'lower', 'upper',
'space', 'underscore', 'punctuation', 'printable', 'binary', 'octal',
'decimal', 'hexadecimal', 'radix64' which refer to standard set of
charaters. Theses names may be combined by separating them by a space::
sage: build_alphabet(name="lower")
{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}
sage: build_alphabet(name="hexadecimal")
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
sage: build_alphabet(name="decimal punctuation")
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', ',', '.', ';', ':', '!', '?'}
In the case the alphabet is built from a list or a tuple, the order on the
alphabet is given by the elements themselves::
sage: A = build_alphabet([0,2,1])
sage: A(0) < A(2)
True
sage: A(2) < A(1)
False
If a different order is needed, you may use
:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet` and
set the option ``facade`` to ``False``. That way, the comparison fits the
order of the input::
sage: A = TotallyOrderedFiniteSet([4,2,6,1], facade=False)
sage: A(4) < A(2)
True
sage: A(1) < A(6)
False
Be careful, the element of the set in the last example are no more
integers and do not compare equal with integers::
sage: type(A.an_element())
<class 'sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet_with_category.element_class'>
sage: A(1) == 1
False
sage: 1 == A(1)
False
We give an example of an infinite alphabet indexed by the positive
integers and the prime numbers::
sage: build_alphabet(oo, 'x')
Lazy family (x(i))_{i in Non negative integers}
sage: build_alphabet(Primes(), 'y')
Lazy family (y(i))_{i in Set of all prime numbers: 2, 3, 5, 7, ...}
TESTS::
sage: Alphabet(3, name="punctuation")
Traceback (most recent call last):
...
ValueError: name cannot be specified with any other argument
sage: Alphabet(8, ['e']*10)
Traceback (most recent call last):
...
ValueError: invalid value for names
sage: Alphabet(8, x)
Traceback (most recent call last):
...
ValueError: invalid value for names
sage: Alphabet(name=x, names="punctuation")
Traceback (most recent call last):
...
ValueError: name cannot be specified with any other argument
sage: Alphabet(x)
Traceback (most recent call last):
...
ValueError: unable to construct an alphabet from the given parameters
|
src/sage/combinat/words/alphabet.py
|
build_alphabet
|
bopopescu/sagesmc
| 5
|
python
|
def build_alphabet(data=None, names=None, name=None):
'\n Return an object representing an ordered alphabet.\n\n INPUT:\n\n - ``data`` -- the letters of the alphabet; it can be:\n\n * a list/tuple/iterable of letters; the iterable may be infinite\n * an integer `n` to represent `\\{1, \\ldots, n\\}`, or infinity to\n represent `\\NN`\n\n - ``names`` -- (optional) a list for the letters (i.e. variable names) or\n a string for prefix for all letters; if given a list, it must have the\n same cardinality as the set represented by ``data``\n\n - ``name`` -- (optional) if given, then return a named set and can be\n equal to : ``\'lower\', \'upper\', \'space\',\n \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\', \'decimal\',\n \'hexadecimal\', \'radix64\'``.\n\n You can use many of them at once, separated by spaces : ``\'lower\n punctuation\'`` represents the union of the two alphabets ``\'lower\'`` and\n ``\'punctuation\'``.\n\n Alternatively, ``name`` can be set to ``"positive integers"`` (or\n ``"PP"``) or ``"natural numbers"`` (or ``"NN"``).\n\n ``name`` cannot be combined with ``data``.\n\n EXAMPLES:\n\n If the argument is a Set, it just returns it::\n\n sage: build_alphabet(ZZ) is ZZ\n True\n sage: F = FiniteEnumeratedSet(\'abc\')\n sage: build_alphabet(F) is F\n True\n\n If a list, tuple or string is provided, then it builds a proper Sage class\n (:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`)::\n\n sage: build_alphabet([0,1,2])\n {0, 1, 2}\n sage: F = build_alphabet(\'abc\'); F\n {\'a\', \'b\', \'c\'}\n sage: print type(F).__name__\n TotallyOrderedFiniteSet_with_category\n\n If an integer and a set is given, then it constructs a\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`::\n\n sage: build_alphabet(3, [\'a\',\'b\',\'c\'])\n {\'a\', \'b\', \'c\'}\n\n If an integer and a string is given, then it considers that string as a\n prefix::\n\n sage: build_alphabet(3, \'x\')\n {\'x0\', \'x1\', \'x2\'}\n\n If no data is provided, ``name`` may be a string which describe an alphabet.\n The available names decompose into two families. The first one are \'positive\n integers\', \'PP\', \'natural numbers\' or \'NN\' which refer to standard set of\n numbers::\n\n sage: build_alphabet(name="positive integers")\n Positive integers\n sage: build_alphabet(name="PP")\n Positive integers\n sage: build_alphabet(name="natural numbers")\n Non negative integers\n sage: build_alphabet(name="NN")\n Non negative integers\n\n The other families for the option ``name`` are among \'lower\', \'upper\',\n \'space\', \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\',\n \'decimal\', \'hexadecimal\', \'radix64\' which refer to standard set of\n charaters. Theses names may be combined by separating them by a space::\n\n sage: build_alphabet(name="lower")\n {\'a\', \'b\', \'c\', \'d\', \'e\', \'f\', \'g\', \'h\', \'i\', \'j\', \'k\', \'l\', \'m\', \'n\', \'o\', \'p\', \'q\', \'r\', \'s\', \'t\', \'u\', \'v\', \'w\', \'x\', \'y\', \'z\'}\n sage: build_alphabet(name="hexadecimal")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \'a\', \'b\', \'c\', \'d\', \'e\', \'f\'}\n sage: build_alphabet(name="decimal punctuation")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \' \', \',\', \'.\', \';\', \':\', \'!\', \'?\'}\n\n In the case the alphabet is built from a list or a tuple, the order on the\n alphabet is given by the elements themselves::\n\n sage: A = build_alphabet([0,2,1])\n sage: A(0) < A(2)\n True\n sage: A(2) < A(1)\n False\n\n If a different order is needed, you may use\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet` and\n set the option ``facade`` to ``False``. That way, the comparison fits the\n order of the input::\n\n sage: A = TotallyOrderedFiniteSet([4,2,6,1], facade=False)\n sage: A(4) < A(2)\n True\n sage: A(1) < A(6)\n False\n\n Be careful, the element of the set in the last example are no more\n integers and do not compare equal with integers::\n\n sage: type(A.an_element())\n <class \'sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet_with_category.element_class\'>\n sage: A(1) == 1\n False\n sage: 1 == A(1)\n False\n\n We give an example of an infinite alphabet indexed by the positive\n integers and the prime numbers::\n\n sage: build_alphabet(oo, \'x\')\n Lazy family (x(i))_{i in Non negative integers}\n sage: build_alphabet(Primes(), \'y\')\n Lazy family (y(i))_{i in Set of all prime numbers: 2, 3, 5, 7, ...}\n\n TESTS::\n\n sage: Alphabet(3, name="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(8, [\'e\']*10)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(8, x)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(name=x, names="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(x)\n Traceback (most recent call last):\n ...\n ValueError: unable to construct an alphabet from the given parameters\n '
if ((name is not None) and ((data is not None) or (names is not None))):
raise ValueError('name cannot be specified with any other argument')
if (isinstance(names, (int, long, Integer)) or (names == Infinity) or ((data is None) and (names is not None))):
(data, names) = (names, data)
if isinstance(data, (int, long, Integer)):
if (names is None):
from sage.sets.integer_range import IntegerRange
return IntegerRange(Integer(data))
if isinstance(names, str):
return TotallyOrderedFiniteSet([(names + ('%d' % i)) for i in xrange(data)])
if (len(names) == data):
return TotallyOrderedFiniteSet(names)
raise ValueError('invalid value for names')
if (data == Infinity):
data = NonNegativeIntegers()
if (isinstance(data, (tuple, list, str)) or (data in Sets())):
if (names is not None):
if (not isinstance(names, str)):
raise TypeError('names must be a string when data is a set')
return Family(data, (lambda i: (names + str(i))), name=names)
if (data in Sets()):
return data
return TotallyOrderedFiniteSet(data)
if (name is not None):
if (not isinstance(name, str)):
raise TypeError('name must be a string')
if ((name == 'positive integers') or (name == 'PP')):
from sage.sets.positive_integers import PositiveIntegers
return PositiveIntegers()
if ((name == 'natural numbers') or (name == 'NN')):
return NonNegativeIntegers()
data = []
for alpha_name in name.split(' '):
try:
data.extend(list(set_of_letters[alpha_name]))
except KeyError:
raise TypeError('name is not recognized')
return TotallyOrderedFiniteSet(data)
if (data is None):
from sage.structure.parent import Set_PythonType
return Set_PythonType(object)
raise ValueError('unable to construct an alphabet from the given parameters')
|
def build_alphabet(data=None, names=None, name=None):
'\n Return an object representing an ordered alphabet.\n\n INPUT:\n\n - ``data`` -- the letters of the alphabet; it can be:\n\n * a list/tuple/iterable of letters; the iterable may be infinite\n * an integer `n` to represent `\\{1, \\ldots, n\\}`, or infinity to\n represent `\\NN`\n\n - ``names`` -- (optional) a list for the letters (i.e. variable names) or\n a string for prefix for all letters; if given a list, it must have the\n same cardinality as the set represented by ``data``\n\n - ``name`` -- (optional) if given, then return a named set and can be\n equal to : ``\'lower\', \'upper\', \'space\',\n \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\', \'decimal\',\n \'hexadecimal\', \'radix64\'``.\n\n You can use many of them at once, separated by spaces : ``\'lower\n punctuation\'`` represents the union of the two alphabets ``\'lower\'`` and\n ``\'punctuation\'``.\n\n Alternatively, ``name`` can be set to ``"positive integers"`` (or\n ``"PP"``) or ``"natural numbers"`` (or ``"NN"``).\n\n ``name`` cannot be combined with ``data``.\n\n EXAMPLES:\n\n If the argument is a Set, it just returns it::\n\n sage: build_alphabet(ZZ) is ZZ\n True\n sage: F = FiniteEnumeratedSet(\'abc\')\n sage: build_alphabet(F) is F\n True\n\n If a list, tuple or string is provided, then it builds a proper Sage class\n (:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`)::\n\n sage: build_alphabet([0,1,2])\n {0, 1, 2}\n sage: F = build_alphabet(\'abc\'); F\n {\'a\', \'b\', \'c\'}\n sage: print type(F).__name__\n TotallyOrderedFiniteSet_with_category\n\n If an integer and a set is given, then it constructs a\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`::\n\n sage: build_alphabet(3, [\'a\',\'b\',\'c\'])\n {\'a\', \'b\', \'c\'}\n\n If an integer and a string is given, then it considers that string as a\n prefix::\n\n sage: build_alphabet(3, \'x\')\n {\'x0\', \'x1\', \'x2\'}\n\n If no data is provided, ``name`` may be a string which describe an alphabet.\n The available names decompose into two families. The first one are \'positive\n integers\', \'PP\', \'natural numbers\' or \'NN\' which refer to standard set of\n numbers::\n\n sage: build_alphabet(name="positive integers")\n Positive integers\n sage: build_alphabet(name="PP")\n Positive integers\n sage: build_alphabet(name="natural numbers")\n Non negative integers\n sage: build_alphabet(name="NN")\n Non negative integers\n\n The other families for the option ``name`` are among \'lower\', \'upper\',\n \'space\', \'underscore\', \'punctuation\', \'printable\', \'binary\', \'octal\',\n \'decimal\', \'hexadecimal\', \'radix64\' which refer to standard set of\n charaters. Theses names may be combined by separating them by a space::\n\n sage: build_alphabet(name="lower")\n {\'a\', \'b\', \'c\', \'d\', \'e\', \'f\', \'g\', \'h\', \'i\', \'j\', \'k\', \'l\', \'m\', \'n\', \'o\', \'p\', \'q\', \'r\', \'s\', \'t\', \'u\', \'v\', \'w\', \'x\', \'y\', \'z\'}\n sage: build_alphabet(name="hexadecimal")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \'a\', \'b\', \'c\', \'d\', \'e\', \'f\'}\n sage: build_alphabet(name="decimal punctuation")\n {\'0\', \'1\', \'2\', \'3\', \'4\', \'5\', \'6\', \'7\', \'8\', \'9\', \' \', \',\', \'.\', \';\', \':\', \'!\', \'?\'}\n\n In the case the alphabet is built from a list or a tuple, the order on the\n alphabet is given by the elements themselves::\n\n sage: A = build_alphabet([0,2,1])\n sage: A(0) < A(2)\n True\n sage: A(2) < A(1)\n False\n\n If a different order is needed, you may use\n :class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet` and\n set the option ``facade`` to ``False``. That way, the comparison fits the\n order of the input::\n\n sage: A = TotallyOrderedFiniteSet([4,2,6,1], facade=False)\n sage: A(4) < A(2)\n True\n sage: A(1) < A(6)\n False\n\n Be careful, the element of the set in the last example are no more\n integers and do not compare equal with integers::\n\n sage: type(A.an_element())\n <class \'sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet_with_category.element_class\'>\n sage: A(1) == 1\n False\n sage: 1 == A(1)\n False\n\n We give an example of an infinite alphabet indexed by the positive\n integers and the prime numbers::\n\n sage: build_alphabet(oo, \'x\')\n Lazy family (x(i))_{i in Non negative integers}\n sage: build_alphabet(Primes(), \'y\')\n Lazy family (y(i))_{i in Set of all prime numbers: 2, 3, 5, 7, ...}\n\n TESTS::\n\n sage: Alphabet(3, name="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(8, [\'e\']*10)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(8, x)\n Traceback (most recent call last):\n ...\n ValueError: invalid value for names\n sage: Alphabet(name=x, names="punctuation")\n Traceback (most recent call last):\n ...\n ValueError: name cannot be specified with any other argument\n sage: Alphabet(x)\n Traceback (most recent call last):\n ...\n ValueError: unable to construct an alphabet from the given parameters\n '
if ((name is not None) and ((data is not None) or (names is not None))):
raise ValueError('name cannot be specified with any other argument')
if (isinstance(names, (int, long, Integer)) or (names == Infinity) or ((data is None) and (names is not None))):
(data, names) = (names, data)
if isinstance(data, (int, long, Integer)):
if (names is None):
from sage.sets.integer_range import IntegerRange
return IntegerRange(Integer(data))
if isinstance(names, str):
return TotallyOrderedFiniteSet([(names + ('%d' % i)) for i in xrange(data)])
if (len(names) == data):
return TotallyOrderedFiniteSet(names)
raise ValueError('invalid value for names')
if (data == Infinity):
data = NonNegativeIntegers()
if (isinstance(data, (tuple, list, str)) or (data in Sets())):
if (names is not None):
if (not isinstance(names, str)):
raise TypeError('names must be a string when data is a set')
return Family(data, (lambda i: (names + str(i))), name=names)
if (data in Sets()):
return data
return TotallyOrderedFiniteSet(data)
if (name is not None):
if (not isinstance(name, str)):
raise TypeError('name must be a string')
if ((name == 'positive integers') or (name == 'PP')):
from sage.sets.positive_integers import PositiveIntegers
return PositiveIntegers()
if ((name == 'natural numbers') or (name == 'NN')):
return NonNegativeIntegers()
data = []
for alpha_name in name.split(' '):
try:
data.extend(list(set_of_letters[alpha_name]))
except KeyError:
raise TypeError('name is not recognized')
return TotallyOrderedFiniteSet(data)
if (data is None):
from sage.structure.parent import Set_PythonType
return Set_PythonType(object)
raise ValueError('unable to construct an alphabet from the given parameters')<|docstring|>Return an object representing an ordered alphabet.
INPUT:
- ``data`` -- the letters of the alphabet; it can be:
* a list/tuple/iterable of letters; the iterable may be infinite
* an integer `n` to represent `\{1, \ldots, n\}`, or infinity to
represent `\NN`
- ``names`` -- (optional) a list for the letters (i.e. variable names) or
a string for prefix for all letters; if given a list, it must have the
same cardinality as the set represented by ``data``
- ``name`` -- (optional) if given, then return a named set and can be
equal to : ``'lower', 'upper', 'space',
'underscore', 'punctuation', 'printable', 'binary', 'octal', 'decimal',
'hexadecimal', 'radix64'``.
You can use many of them at once, separated by spaces : ``'lower
punctuation'`` represents the union of the two alphabets ``'lower'`` and
``'punctuation'``.
Alternatively, ``name`` can be set to ``"positive integers"`` (or
``"PP"``) or ``"natural numbers"`` (or ``"NN"``).
``name`` cannot be combined with ``data``.
EXAMPLES:
If the argument is a Set, it just returns it::
sage: build_alphabet(ZZ) is ZZ
True
sage: F = FiniteEnumeratedSet('abc')
sage: build_alphabet(F) is F
True
If a list, tuple or string is provided, then it builds a proper Sage class
(:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`)::
sage: build_alphabet([0,1,2])
{0, 1, 2}
sage: F = build_alphabet('abc'); F
{'a', 'b', 'c'}
sage: print type(F).__name__
TotallyOrderedFiniteSet_with_category
If an integer and a set is given, then it constructs a
:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet`::
sage: build_alphabet(3, ['a','b','c'])
{'a', 'b', 'c'}
If an integer and a string is given, then it considers that string as a
prefix::
sage: build_alphabet(3, 'x')
{'x0', 'x1', 'x2'}
If no data is provided, ``name`` may be a string which describe an alphabet.
The available names decompose into two families. The first one are 'positive
integers', 'PP', 'natural numbers' or 'NN' which refer to standard set of
numbers::
sage: build_alphabet(name="positive integers")
Positive integers
sage: build_alphabet(name="PP")
Positive integers
sage: build_alphabet(name="natural numbers")
Non negative integers
sage: build_alphabet(name="NN")
Non negative integers
The other families for the option ``name`` are among 'lower', 'upper',
'space', 'underscore', 'punctuation', 'printable', 'binary', 'octal',
'decimal', 'hexadecimal', 'radix64' which refer to standard set of
charaters. Theses names may be combined by separating them by a space::
sage: build_alphabet(name="lower")
{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}
sage: build_alphabet(name="hexadecimal")
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
sage: build_alphabet(name="decimal punctuation")
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', ',', '.', ';', ':', '!', '?'}
In the case the alphabet is built from a list or a tuple, the order on the
alphabet is given by the elements themselves::
sage: A = build_alphabet([0,2,1])
sage: A(0) < A(2)
True
sage: A(2) < A(1)
False
If a different order is needed, you may use
:class:`~sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet` and
set the option ``facade`` to ``False``. That way, the comparison fits the
order of the input::
sage: A = TotallyOrderedFiniteSet([4,2,6,1], facade=False)
sage: A(4) < A(2)
True
sage: A(1) < A(6)
False
Be careful, the element of the set in the last example are no more
integers and do not compare equal with integers::
sage: type(A.an_element())
<class 'sage.sets.totally_ordered_finite_set.TotallyOrderedFiniteSet_with_category.element_class'>
sage: A(1) == 1
False
sage: 1 == A(1)
False
We give an example of an infinite alphabet indexed by the positive
integers and the prime numbers::
sage: build_alphabet(oo, 'x')
Lazy family (x(i))_{i in Non negative integers}
sage: build_alphabet(Primes(), 'y')
Lazy family (y(i))_{i in Set of all prime numbers: 2, 3, 5, 7, ...}
TESTS::
sage: Alphabet(3, name="punctuation")
Traceback (most recent call last):
...
ValueError: name cannot be specified with any other argument
sage: Alphabet(8, ['e']*10)
Traceback (most recent call last):
...
ValueError: invalid value for names
sage: Alphabet(8, x)
Traceback (most recent call last):
...
ValueError: invalid value for names
sage: Alphabet(name=x, names="punctuation")
Traceback (most recent call last):
...
ValueError: name cannot be specified with any other argument
sage: Alphabet(x)
Traceback (most recent call last):
...
ValueError: unable to construct an alphabet from the given parameters<|endoftext|>
|
c3fa522e9f07939ce8e0a0c96161a22c8b7e308c60b3cadd91925775f7d4a6fb
|
def __new__(self, alphabet=None, name=None):
"\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: A = OrderedAlphabet('ab'); A # indirect doctest\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n {'a', 'b'}\n "
from sage.misc.superseded import deprecation
deprecation(8920, 'OrderedAlphabet is deprecated; use Alphabet instead.')
if ((alphabet is not None) or (name is not None)):
return build_alphabet(data=alphabet, name=name)
from sage.structure.parent import Parent
return Parent.__new__(OrderedAlphabet_backward_compatibility)
|
EXAMPLES::
sage: from sage.combinat.words.alphabet import OrderedAlphabet
sage: A = OrderedAlphabet('ab'); A # indirect doctest
doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.
See http://trac.sagemath.org/8920 for details.
{'a', 'b'}
|
src/sage/combinat/words/alphabet.py
|
__new__
|
bopopescu/sagesmc
| 5
|
python
|
def __new__(self, alphabet=None, name=None):
"\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: A = OrderedAlphabet('ab'); A # indirect doctest\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n {'a', 'b'}\n "
from sage.misc.superseded import deprecation
deprecation(8920, 'OrderedAlphabet is deprecated; use Alphabet instead.')
if ((alphabet is not None) or (name is not None)):
return build_alphabet(data=alphabet, name=name)
from sage.structure.parent import Parent
return Parent.__new__(OrderedAlphabet_backward_compatibility)
|
def __new__(self, alphabet=None, name=None):
"\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: A = OrderedAlphabet('ab'); A # indirect doctest\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n {'a', 'b'}\n "
from sage.misc.superseded import deprecation
deprecation(8920, 'OrderedAlphabet is deprecated; use Alphabet instead.')
if ((alphabet is not None) or (name is not None)):
return build_alphabet(data=alphabet, name=name)
from sage.structure.parent import Parent
return Parent.__new__(OrderedAlphabet_backward_compatibility)<|docstring|>EXAMPLES::
sage: from sage.combinat.words.alphabet import OrderedAlphabet
sage: A = OrderedAlphabet('ab'); A # indirect doctest
doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.
See http://trac.sagemath.org/8920 for details.
{'a', 'b'}<|endoftext|>
|
7235256742d1487691fa6a56edda32b43a8337f7717777fb7bf8cee176db42bb
|
def __getattr__(self, name):
"\n If the attribute '_elements' is called then it is set to '_alphabet'.\n\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: O = OrderedAlphabet()\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n sage: O._alphabet = ['a', 'b']\n sage: O._elements\n ('a', 'b')\n "
if (name == '_elements'):
if (not hasattr(self, '_alphabet')):
raise AttributeError("no attribute '_elements'")
self._elements = tuple(self._alphabet)
from sage.structure.parent import Parent
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
Parent.__init__(self, category=FiniteEnumeratedSets(), facade=True)
return self._elements
raise AttributeError(('no attribute %s' % name))
|
If the attribute '_elements' is called then it is set to '_alphabet'.
EXAMPLES::
sage: from sage.combinat.words.alphabet import OrderedAlphabet
sage: O = OrderedAlphabet()
doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.
See http://trac.sagemath.org/8920 for details.
sage: O._alphabet = ['a', 'b']
sage: O._elements
('a', 'b')
|
src/sage/combinat/words/alphabet.py
|
__getattr__
|
bopopescu/sagesmc
| 5
|
python
|
def __getattr__(self, name):
"\n If the attribute '_elements' is called then it is set to '_alphabet'.\n\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: O = OrderedAlphabet()\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n sage: O._alphabet = ['a', 'b']\n sage: O._elements\n ('a', 'b')\n "
if (name == '_elements'):
if (not hasattr(self, '_alphabet')):
raise AttributeError("no attribute '_elements'")
self._elements = tuple(self._alphabet)
from sage.structure.parent import Parent
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
Parent.__init__(self, category=FiniteEnumeratedSets(), facade=True)
return self._elements
raise AttributeError(('no attribute %s' % name))
|
def __getattr__(self, name):
"\n If the attribute '_elements' is called then it is set to '_alphabet'.\n\n EXAMPLES::\n\n sage: from sage.combinat.words.alphabet import OrderedAlphabet\n sage: O = OrderedAlphabet()\n doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.\n See http://trac.sagemath.org/8920 for details.\n sage: O._alphabet = ['a', 'b']\n sage: O._elements\n ('a', 'b')\n "
if (name == '_elements'):
if (not hasattr(self, '_alphabet')):
raise AttributeError("no attribute '_elements'")
self._elements = tuple(self._alphabet)
from sage.structure.parent import Parent
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
Parent.__init__(self, category=FiniteEnumeratedSets(), facade=True)
return self._elements
raise AttributeError(('no attribute %s' % name))<|docstring|>If the attribute '_elements' is called then it is set to '_alphabet'.
EXAMPLES::
sage: from sage.combinat.words.alphabet import OrderedAlphabet
sage: O = OrderedAlphabet()
doctest:1: DeprecationWarning: OrderedAlphabet is deprecated; use Alphabet instead.
See http://trac.sagemath.org/8920 for details.
sage: O._alphabet = ['a', 'b']
sage: O._elements
('a', 'b')<|endoftext|>
|
2ff1a41df72dab016fe166af9ffe1ef65e507dce6d40fcbbfc6d1613b1b151ea
|
def sample_unit_sphere(npoints):
'\n return `npoints` random points on the unit sphere\n '
vec = np.random.randn(3, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec.T
|
return `npoints` random points on the unit sphere
|
a_simple_algorithm_for_metamer_mismatch_bodies/metamer_mismatch_body.py
|
sample_unit_sphere
|
Chandler/color_science_papers
| 1
|
python
|
def sample_unit_sphere(npoints):
'\n \n '
vec = np.random.randn(3, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec.T
|
def sample_unit_sphere(npoints):
'\n \n '
vec = np.random.randn(3, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec.T<|docstring|>return `npoints` random points on the unit sphere<|endoftext|>
|
b75935aa7c98a1f0941ec09bcfce2b686171d1c69ee92605df1784cf37eb86e4
|
def solve_linear_program(object_function_coefficents, constraint_function=None, constraint_function_required_value=None, bounds=None):
'\n This method minimizes and maximizes a linear function with respect to\n an equality constraint and lower and upper bounds\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html\n Minimize: c^T * x\n Subject to: \n A_ub * x <= b_ub\n A_eq * x == b_eq\n '
xmax = optimize.linprog(c=object_function_coefficents, A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
xmin = optimize.linprog(c=(object_function_coefficents * (- 1)), A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
return (xmin, xmax)
|
This method minimizes and maximizes a linear function with respect to
an equality constraint and lower and upper bounds
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html
Minimize: c^T * x
Subject to:
A_ub * x <= b_ub
A_eq * x == b_eq
|
a_simple_algorithm_for_metamer_mismatch_bodies/metamer_mismatch_body.py
|
solve_linear_program
|
Chandler/color_science_papers
| 1
|
python
|
def solve_linear_program(object_function_coefficents, constraint_function=None, constraint_function_required_value=None, bounds=None):
'\n This method minimizes and maximizes a linear function with respect to\n an equality constraint and lower and upper bounds\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html\n Minimize: c^T * x\n Subject to: \n A_ub * x <= b_ub\n A_eq * x == b_eq\n '
xmax = optimize.linprog(c=object_function_coefficents, A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
xmin = optimize.linprog(c=(object_function_coefficents * (- 1)), A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
return (xmin, xmax)
|
def solve_linear_program(object_function_coefficents, constraint_function=None, constraint_function_required_value=None, bounds=None):
'\n This method minimizes and maximizes a linear function with respect to\n an equality constraint and lower and upper bounds\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html\n Minimize: c^T * x\n Subject to: \n A_ub * x <= b_ub\n A_eq * x == b_eq\n '
xmax = optimize.linprog(c=object_function_coefficents, A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
xmin = optimize.linprog(c=(object_function_coefficents * (- 1)), A_eq=constraint_function, b_eq=constraint_function_required_value, bounds=bounds).x
return (xmin, xmax)<|docstring|>This method minimizes and maximizes a linear function with respect to
an equality constraint and lower and upper bounds
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html
Minimize: c^T * x
Subject to:
A_ub * x <= b_ub
A_eq * x == b_eq<|endoftext|>
|
193b1448400f21007fe05b9e0f7e82f9239b9f44fcc5a9dc39ee9cc2fbdd0d5c
|
def compute_object_color_solid(observer_response_functions, scene_illumination=equal_energy_illumination_vector, sampling_resolution=100):
'\n The linear programming formulation of the OCS is identical to that of the MMB minus\n the constraints related to the second observer.\n\n An MMB is a product of two observers but the OCS is simply the set of all object colors\n for a single observer.\n\n "Computing the object colour solid using spherical sampling"\n https://ueaeprints.uea.ac.uk/62975/\n '
assert_shape(observer_response_functions, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))
assert_shape(scene_illumination, (LIGHT_DIMENSIONS,))
color_signal_map = (observer_response_functions.T * scene_illumination).T
ocs_extrema_points = []
for direction_vector in sample_unit_sphere(sampling_resolution):
direction_functional = direction_vector
ΨF = np.dot(color_signal_map, direction_functional)
(min_reflectance, max_reflectance) = solve_linear_program(object_function_coefficents=ΨF, bounds=(0, 1))
min_color_signal = np.dot(color_signal_map.T, min_reflectance)
max_color_signal = np.dot(color_signal_map.T, max_reflectance)
ocs_extrema_points.extend([min_color_signal, max_color_signal])
scale_factor = np.max(np.dot(observer_response_functions.T, scene_illumination))
return [(p / scale_factor) for p in ocs_extrema_points]
|
The linear programming formulation of the OCS is identical to that of the MMB minus
the constraints related to the second observer.
An MMB is a product of two observers but the OCS is simply the set of all object colors
for a single observer.
"Computing the object colour solid using spherical sampling"
https://ueaeprints.uea.ac.uk/62975/
|
a_simple_algorithm_for_metamer_mismatch_bodies/metamer_mismatch_body.py
|
compute_object_color_solid
|
Chandler/color_science_papers
| 1
|
python
|
def compute_object_color_solid(observer_response_functions, scene_illumination=equal_energy_illumination_vector, sampling_resolution=100):
'\n The linear programming formulation of the OCS is identical to that of the MMB minus\n the constraints related to the second observer.\n\n An MMB is a product of two observers but the OCS is simply the set of all object colors\n for a single observer.\n\n "Computing the object colour solid using spherical sampling"\n https://ueaeprints.uea.ac.uk/62975/\n '
assert_shape(observer_response_functions, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))
assert_shape(scene_illumination, (LIGHT_DIMENSIONS,))
color_signal_map = (observer_response_functions.T * scene_illumination).T
ocs_extrema_points = []
for direction_vector in sample_unit_sphere(sampling_resolution):
direction_functional = direction_vector
ΨF = np.dot(color_signal_map, direction_functional)
(min_reflectance, max_reflectance) = solve_linear_program(object_function_coefficents=ΨF, bounds=(0, 1))
min_color_signal = np.dot(color_signal_map.T, min_reflectance)
max_color_signal = np.dot(color_signal_map.T, max_reflectance)
ocs_extrema_points.extend([min_color_signal, max_color_signal])
scale_factor = np.max(np.dot(observer_response_functions.T, scene_illumination))
return [(p / scale_factor) for p in ocs_extrema_points]
|
def compute_object_color_solid(observer_response_functions, scene_illumination=equal_energy_illumination_vector, sampling_resolution=100):
'\n The linear programming formulation of the OCS is identical to that of the MMB minus\n the constraints related to the second observer.\n\n An MMB is a product of two observers but the OCS is simply the set of all object colors\n for a single observer.\n\n "Computing the object colour solid using spherical sampling"\n https://ueaeprints.uea.ac.uk/62975/\n '
assert_shape(observer_response_functions, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))
assert_shape(scene_illumination, (LIGHT_DIMENSIONS,))
color_signal_map = (observer_response_functions.T * scene_illumination).T
ocs_extrema_points = []
for direction_vector in sample_unit_sphere(sampling_resolution):
direction_functional = direction_vector
ΨF = np.dot(color_signal_map, direction_functional)
(min_reflectance, max_reflectance) = solve_linear_program(object_function_coefficents=ΨF, bounds=(0, 1))
min_color_signal = np.dot(color_signal_map.T, min_reflectance)
max_color_signal = np.dot(color_signal_map.T, max_reflectance)
ocs_extrema_points.extend([min_color_signal, max_color_signal])
scale_factor = np.max(np.dot(observer_response_functions.T, scene_illumination))
return [(p / scale_factor) for p in ocs_extrema_points]<|docstring|>The linear programming formulation of the OCS is identical to that of the MMB minus
the constraints related to the second observer.
An MMB is a product of two observers but the OCS is simply the set of all object colors
for a single observer.
"Computing the object colour solid using spherical sampling"
https://ueaeprints.uea.ac.uk/62975/<|endoftext|>
|
5cd652c478066a750723caf29dba8cdf4ed72a178d4275b0a2734e7ddd285e1c
|
def save_params(path: str, params: Any):
'Saves parameters in Flax format.'
with File(path, 'wb') as fout:
fout.write(pickle.dumps(params))
|
Saves parameters in Flax format.
|
brax/io/model.py
|
save_params
|
Egiob/brax
| 1,162
|
python
|
def save_params(path: str, params: Any):
with File(path, 'wb') as fout:
fout.write(pickle.dumps(params))
|
def save_params(path: str, params: Any):
with File(path, 'wb') as fout:
fout.write(pickle.dumps(params))<|docstring|>Saves parameters in Flax format.<|endoftext|>
|
d4f80e225317b257325fc44a0154094313812228fb34937d7412e29e0d11b68d
|
def __init__(self, model: VanillaBayesianQuadrature):
'\n :param model: The vanilla Bayesian quadrature model\n '
self.model = model
|
:param model: The vanilla Bayesian quadrature model
|
emukit/quadrature/acquisitions/squared_correlation.py
|
__init__
|
polivucci/emukit
| 6
|
python
|
def __init__(self, model: VanillaBayesianQuadrature):
'\n \n '
self.model = model
|
def __init__(self, model: VanillaBayesianQuadrature):
'\n \n '
self.model = model<|docstring|>:param model: The vanilla Bayesian quadrature model<|endoftext|>
|
fc29ab6db63166806b09217582e71267568dd9ae1783f22255bc1964764b7cef
|
def evaluate(self, x: np.ndarray) -> np.ndarray:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_points x 1) the acquisition function value at x\n '
return self._evaluate(x)[0]
|
Evaluates the acquisition function at x.
:param x: (n_points x input_dim) locations where to evaluate
:return: (n_points x 1) the acquisition function value at x
|
emukit/quadrature/acquisitions/squared_correlation.py
|
evaluate
|
polivucci/emukit
| 6
|
python
|
def evaluate(self, x: np.ndarray) -> np.ndarray:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_points x 1) the acquisition function value at x\n '
return self._evaluate(x)[0]
|
def evaluate(self, x: np.ndarray) -> np.ndarray:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_points x 1) the acquisition function value at x\n '
return self._evaluate(x)[0]<|docstring|>Evaluates the acquisition function at x.
:param x: (n_points x input_dim) locations where to evaluate
:return: (n_points x 1) the acquisition function value at x<|endoftext|>
|
78e8fd71e4b1e77b28056a224bf91256534fa93d87e9450cbe6d94c8ce8b890d
|
def _evaluate(self, x: np.ndarray) -> Tuple[(np.ndarray, np.float, np.ndarray, np.ndarray)]:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the acquisition function value at x, shape (n_points x 1), current integral variance,\n predictive variance + noise, predictive covariance between integral and x, shapes of the latter\n two (n_points, 1).\n '
(integral_current_var, y_predictive_var, predictive_cov) = self._value_terms(x)
squared_correlation = ((predictive_cov ** 2) / (integral_current_var * y_predictive_var))
return (squared_correlation, integral_current_var, y_predictive_var, predictive_cov)
|
Evaluates the acquisition function at x.
:param x: (n_points x input_dim) locations where to evaluate
:return: the acquisition function value at x, shape (n_points x 1), current integral variance,
predictive variance + noise, predictive covariance between integral and x, shapes of the latter
two (n_points, 1).
|
emukit/quadrature/acquisitions/squared_correlation.py
|
_evaluate
|
polivucci/emukit
| 6
|
python
|
def _evaluate(self, x: np.ndarray) -> Tuple[(np.ndarray, np.float, np.ndarray, np.ndarray)]:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the acquisition function value at x, shape (n_points x 1), current integral variance,\n predictive variance + noise, predictive covariance between integral and x, shapes of the latter\n two (n_points, 1).\n '
(integral_current_var, y_predictive_var, predictive_cov) = self._value_terms(x)
squared_correlation = ((predictive_cov ** 2) / (integral_current_var * y_predictive_var))
return (squared_correlation, integral_current_var, y_predictive_var, predictive_cov)
|
def _evaluate(self, x: np.ndarray) -> Tuple[(np.ndarray, np.float, np.ndarray, np.ndarray)]:
'\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the acquisition function value at x, shape (n_points x 1), current integral variance,\n predictive variance + noise, predictive covariance between integral and x, shapes of the latter\n two (n_points, 1).\n '
(integral_current_var, y_predictive_var, predictive_cov) = self._value_terms(x)
squared_correlation = ((predictive_cov ** 2) / (integral_current_var * y_predictive_var))
return (squared_correlation, integral_current_var, y_predictive_var, predictive_cov)<|docstring|>Evaluates the acquisition function at x.
:param x: (n_points x input_dim) locations where to evaluate
:return: the acquisition function value at x, shape (n_points x 1), current integral variance,
predictive variance + noise, predictive covariance between integral and x, shapes of the latter
two (n_points, 1).<|endoftext|>
|
270e44f325d995dc6bbe65153de792512d02a0766d25d34bcce13a21e882393f
|
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Evaluate the acquisition function with gradient\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)\n '
(squared_correlation, integral_current_var, y_predictive_var, predictive_cov) = self._evaluate(x)
(d_y_predictive_var_dx, d_predictive_cov_dx) = self._gradient_terms(x)
first_term = ((2.0 * predictive_cov) * d_predictive_cov_dx)
second_term = (((predictive_cov ** 2) / y_predictive_var) * d_y_predictive_var_dx)
normalization = (integral_current_var * y_predictive_var)
squared_correlation_gradient = ((first_term - second_term) / normalization)
return (squared_correlation, squared_correlation_gradient)
|
Evaluate the acquisition function with gradient
:param x: (n_points x input_dim) locations where to evaluate
:return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)
|
emukit/quadrature/acquisitions/squared_correlation.py
|
evaluate_with_gradients
|
polivucci/emukit
| 6
|
python
|
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Evaluate the acquisition function with gradient\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)\n '
(squared_correlation, integral_current_var, y_predictive_var, predictive_cov) = self._evaluate(x)
(d_y_predictive_var_dx, d_predictive_cov_dx) = self._gradient_terms(x)
first_term = ((2.0 * predictive_cov) * d_predictive_cov_dx)
second_term = (((predictive_cov ** 2) / y_predictive_var) * d_y_predictive_var_dx)
normalization = (integral_current_var * y_predictive_var)
squared_correlation_gradient = ((first_term - second_term) / normalization)
return (squared_correlation, squared_correlation_gradient)
|
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Evaluate the acquisition function with gradient\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)\n '
(squared_correlation, integral_current_var, y_predictive_var, predictive_cov) = self._evaluate(x)
(d_y_predictive_var_dx, d_predictive_cov_dx) = self._gradient_terms(x)
first_term = ((2.0 * predictive_cov) * d_predictive_cov_dx)
second_term = (((predictive_cov ** 2) / y_predictive_var) * d_y_predictive_var_dx)
normalization = (integral_current_var * y_predictive_var)
squared_correlation_gradient = ((first_term - second_term) / normalization)
return (squared_correlation, squared_correlation_gradient)<|docstring|>Evaluate the acquisition function with gradient
:param x: (n_points x input_dim) locations where to evaluate
:return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)<|endoftext|>
|
b09dff98d21a892e76451e591b69bd7b376bb6614757c1ad35adc0a2f928ee2c
|
def _value_terms(self, x: np.ndarray) -> Tuple[(np.float, np.ndarray, np.ndarray)]:
'\n computes the terms needed for the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: current integral variance, predictive variance + noise, predictive covariance between integral and x,\n shapes of the latter two arrays are (n_points, 1).\n '
integral_current_var = self.model.integrate()[1]
y_predictive_var = (self.model.predict(x)[1] + self.model.base_gp.observation_noise_variance)
qKx = self.model.base_gp.kern.qK(x)
qKX = self.model.base_gp.kern.qK(self.model.base_gp.X)
predictive_cov = np.transpose((qKx - np.dot(qKX, self._graminv_Kx(x))))
return (integral_current_var, y_predictive_var, predictive_cov)
|
computes the terms needed for the squared correlation
:param x: (n_points x input_dim) locations where to evaluate
:return: current integral variance, predictive variance + noise, predictive covariance between integral and x,
shapes of the latter two arrays are (n_points, 1).
|
emukit/quadrature/acquisitions/squared_correlation.py
|
_value_terms
|
polivucci/emukit
| 6
|
python
|
def _value_terms(self, x: np.ndarray) -> Tuple[(np.float, np.ndarray, np.ndarray)]:
'\n computes the terms needed for the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: current integral variance, predictive variance + noise, predictive covariance between integral and x,\n shapes of the latter two arrays are (n_points, 1).\n '
integral_current_var = self.model.integrate()[1]
y_predictive_var = (self.model.predict(x)[1] + self.model.base_gp.observation_noise_variance)
qKx = self.model.base_gp.kern.qK(x)
qKX = self.model.base_gp.kern.qK(self.model.base_gp.X)
predictive_cov = np.transpose((qKx - np.dot(qKX, self._graminv_Kx(x))))
return (integral_current_var, y_predictive_var, predictive_cov)
|
def _value_terms(self, x: np.ndarray) -> Tuple[(np.float, np.ndarray, np.ndarray)]:
'\n computes the terms needed for the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: current integral variance, predictive variance + noise, predictive covariance between integral and x,\n shapes of the latter two arrays are (n_points, 1).\n '
integral_current_var = self.model.integrate()[1]
y_predictive_var = (self.model.predict(x)[1] + self.model.base_gp.observation_noise_variance)
qKx = self.model.base_gp.kern.qK(x)
qKX = self.model.base_gp.kern.qK(self.model.base_gp.X)
predictive_cov = np.transpose((qKx - np.dot(qKX, self._graminv_Kx(x))))
return (integral_current_var, y_predictive_var, predictive_cov)<|docstring|>computes the terms needed for the squared correlation
:param x: (n_points x input_dim) locations where to evaluate
:return: current integral variance, predictive variance + noise, predictive covariance between integral and x,
shapes of the latter two arrays are (n_points, 1).<|endoftext|>
|
c6b00eb6ec822e64104e6b70d8517f557ab2f9184f9559fb7ca432a44dbe25fb
|
def _gradient_terms(self, x):
'\n Computes the terms needed for the gradient of the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)\n '
dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)
dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)
graminv_KXx = self._graminv_Kx(x)
d_y_predictive_var_dx = (dvar_dx - (2.0 * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)))
dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))
qKX_graminv = self._qK_graminv()
dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)
d_predictive_cov_dx = (dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[(0, :, :)])
return (np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx)
|
Computes the terms needed for the gradient of the squared correlation
:param x: (n_points x input_dim) locations where to evaluate
:return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)
|
emukit/quadrature/acquisitions/squared_correlation.py
|
_gradient_terms
|
polivucci/emukit
| 6
|
python
|
def _gradient_terms(self, x):
'\n Computes the terms needed for the gradient of the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)\n '
dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)
dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)
graminv_KXx = self._graminv_Kx(x)
d_y_predictive_var_dx = (dvar_dx - (2.0 * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)))
dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))
qKX_graminv = self._qK_graminv()
dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)
d_predictive_cov_dx = (dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[(0, :, :)])
return (np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx)
|
def _gradient_terms(self, x):
'\n Computes the terms needed for the gradient of the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)\n '
dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)
dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)
graminv_KXx = self._graminv_Kx(x)
d_y_predictive_var_dx = (dvar_dx - (2.0 * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)))
dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))
qKX_graminv = self._qK_graminv()
dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)
d_predictive_cov_dx = (dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[(0, :, :)])
return (np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx)<|docstring|>Computes the terms needed for the gradient of the squared correlation
:param x: (n_points x input_dim) locations where to evaluate
:return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)<|endoftext|>
|
72d98063bd89608d649c378302f3d149466f79d950a8ab70c9804091cde27bc6
|
def _graminv_Kx(self, x):
"\n Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints\n and location x.\n\n .. math::\n [K(X, X) + \\sigma^2 I]^{-1} K (X, x)\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_train_points, n_points)\n "
lower_chol = self.model.base_gp.gram_chol()
KXx = self.model.base_gp.kern.K(self.model.base_gp.X, x)
return lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, KXx, lower=1)[0], lower=0)[0]
|
Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints
and location x.
.. math::
[K(X, X) + \sigma^2 I]^{-1} K (X, x)
:param x: (n_points x input_dim) locations where to evaluate
:return: (n_train_points, n_points)
|
emukit/quadrature/acquisitions/squared_correlation.py
|
_graminv_Kx
|
polivucci/emukit
| 6
|
python
|
def _graminv_Kx(self, x):
"\n Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints\n and location x.\n\n .. math::\n [K(X, X) + \\sigma^2 I]^{-1} K (X, x)\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_train_points, n_points)\n "
lower_chol = self.model.base_gp.gram_chol()
KXx = self.model.base_gp.kern.K(self.model.base_gp.X, x)
return lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, KXx, lower=1)[0], lower=0)[0]
|
def _graminv_Kx(self, x):
"\n Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints\n and location x.\n\n .. math::\n [K(X, X) + \\sigma^2 I]^{-1} K (X, x)\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_train_points, n_points)\n "
lower_chol = self.model.base_gp.gram_chol()
KXx = self.model.base_gp.kern.K(self.model.base_gp.X, x)
return lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, KXx, lower=1)[0], lower=0)[0]<|docstring|>Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints
and location x.
.. math::
[K(X, X) + \sigma^2 I]^{-1} K (X, x)
:param x: (n_points x input_dim) locations where to evaluate
:return: (n_train_points, n_points)<|endoftext|>
|
6596756b1bc27d02b42be2f33979c09515adccec710a8617928bfd728159c45c
|
def _qK_graminv(self):
'\n Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.\n\n .. math::\n \\int k(x, X)\\mathrm{d}x [k(X, X) + \\sigma^2 I]^{-1}\n\n :return: weights of shape (1, n_train_points)\n '
lower_chol = self.model.base_gp.gram_chol()
qK = self.model.base_gp.kern.qK(self.model.base_gp.X)
graminv_qK_trans = lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, qK.T, lower=1)[0], lower=0)[0]
return np.transpose(graminv_qK_trans)
|
Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.
.. math::
\int k(x, X)\mathrm{d}x [k(X, X) + \sigma^2 I]^{-1}
:return: weights of shape (1, n_train_points)
|
emukit/quadrature/acquisitions/squared_correlation.py
|
_qK_graminv
|
polivucci/emukit
| 6
|
python
|
def _qK_graminv(self):
'\n Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.\n\n .. math::\n \\int k(x, X)\\mathrm{d}x [k(X, X) + \\sigma^2 I]^{-1}\n\n :return: weights of shape (1, n_train_points)\n '
lower_chol = self.model.base_gp.gram_chol()
qK = self.model.base_gp.kern.qK(self.model.base_gp.X)
graminv_qK_trans = lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, qK.T, lower=1)[0], lower=0)[0]
return np.transpose(graminv_qK_trans)
|
def _qK_graminv(self):
'\n Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.\n\n .. math::\n \\int k(x, X)\\mathrm{d}x [k(X, X) + \\sigma^2 I]^{-1}\n\n :return: weights of shape (1, n_train_points)\n '
lower_chol = self.model.base_gp.gram_chol()
qK = self.model.base_gp.kern.qK(self.model.base_gp.X)
graminv_qK_trans = lapack.dtrtrs(lower_chol.T, lapack.dtrtrs(lower_chol, qK.T, lower=1)[0], lower=0)[0]
return np.transpose(graminv_qK_trans)<|docstring|>Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.
.. math::
\int k(x, X)\mathrm{d}x [k(X, X) + \sigma^2 I]^{-1}
:return: weights of shape (1, n_train_points)<|endoftext|>
|
f4cdf6b2c15251549724e16183c371b33b636834d1523eb287259a7d77c6bdd1
|
def get_image_only_data(list_file=IMAGE_LIST, batch_size=32, image_processor=_read_image, image_height=IMAGE_HEIGHT, image_width=IMAGE_WIDTH):
'\n Returns a dataset containing only images\n '
with tf.device('/cpu:0'):
textfile = tf.data.TextLineDataset(str(list_file))
shuffled = textfile.cache().repeat().shuffle(5000)
images = shuffled.map(image_processor, 8).prefetch((batch_size * 2))
batch = images.batch(batch_size).make_one_shot_iterator().get_next()
return tf.reshape(batch, (batch_size, image_height, image_width, 3))
|
Returns a dataset containing only images
|
models/model.py
|
get_image_only_data
|
Aggrathon/MtGan
| 0
|
python
|
def get_image_only_data(list_file=IMAGE_LIST, batch_size=32, image_processor=_read_image, image_height=IMAGE_HEIGHT, image_width=IMAGE_WIDTH):
'\n \n '
with tf.device('/cpu:0'):
textfile = tf.data.TextLineDataset(str(list_file))
shuffled = textfile.cache().repeat().shuffle(5000)
images = shuffled.map(image_processor, 8).prefetch((batch_size * 2))
batch = images.batch(batch_size).make_one_shot_iterator().get_next()
return tf.reshape(batch, (batch_size, image_height, image_width, 3))
|
def get_image_only_data(list_file=IMAGE_LIST, batch_size=32, image_processor=_read_image, image_height=IMAGE_HEIGHT, image_width=IMAGE_WIDTH):
'\n \n '
with tf.device('/cpu:0'):
textfile = tf.data.TextLineDataset(str(list_file))
shuffled = textfile.cache().repeat().shuffle(5000)
images = shuffled.map(image_processor, 8).prefetch((batch_size * 2))
batch = images.batch(batch_size).make_one_shot_iterator().get_next()
return tf.reshape(batch, (batch_size, image_height, image_width, 3))<|docstring|>Returns a dataset containing only images<|endoftext|>
|
b3b6c3183e5389e12a61aa76c6b1d55a23a7cfe9c3a8743609704d117a3fae88
|
def get_art_only_data(list_file=ART_LIST, batch_size=32):
'\n Returns a dataset containing only art\n '
return get_image_only_data(list_file, batch_size, _read_image_random)
|
Returns a dataset containing only art
|
models/model.py
|
get_art_only_data
|
Aggrathon/MtGan
| 0
|
python
|
def get_art_only_data(list_file=ART_LIST, batch_size=32):
'\n \n '
return get_image_only_data(list_file, batch_size, _read_image_random)
|
def get_art_only_data(list_file=ART_LIST, batch_size=32):
'\n \n '
return get_image_only_data(list_file, batch_size, _read_image_random)<|docstring|>Returns a dataset containing only art<|endoftext|>
|
84667da849f9c673d48f698cdf0948bff2038a8423a46d9817baeca5d5b92505
|
def get_art_only_cropped(art_list=ART_GREEN_LIST, batch_size=32):
'\n Returns a dataset containing cropped art\n '
return get_image_only_data(art_list, batch_size, _read_image_random_crop, (8 * 16), (10 * 16))
|
Returns a dataset containing cropped art
|
models/model.py
|
get_art_only_cropped
|
Aggrathon/MtGan
| 0
|
python
|
def get_art_only_cropped(art_list=ART_GREEN_LIST, batch_size=32):
'\n \n '
return get_image_only_data(art_list, batch_size, _read_image_random_crop, (8 * 16), (10 * 16))
|
def get_art_only_cropped(art_list=ART_GREEN_LIST, batch_size=32):
'\n \n '
return get_image_only_data(art_list, batch_size, _read_image_random_crop, (8 * 16), (10 * 16))<|docstring|>Returns a dataset containing cropped art<|endoftext|>
|
0cdb9ec1527f60774574c616758ff678d470bbf04c9eb819e8d55bbc0aaa9643
|
def restore(self, session=None):
'\n Restore a saved model or initialize a new one\n '
if (session is None):
session = self.session
self.saver = tf.train.Saver()
self.summary = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(str((DIRECTORY / self.name)))
try:
self.saver.restore(session, tf.train.latest_checkpoint(str((DIRECTORY / self.name))))
print('Loaded an existing model')
except:
session.run(tf.global_variables_initializer())
self.summary_writer.add_graph(session.graph, 0)
print('Created a new model')
|
Restore a saved model or initialize a new one
|
models/model.py
|
restore
|
Aggrathon/MtGan
| 0
|
python
|
def restore(self, session=None):
'\n \n '
if (session is None):
session = self.session
self.saver = tf.train.Saver()
self.summary = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(str((DIRECTORY / self.name)))
try:
self.saver.restore(session, tf.train.latest_checkpoint(str((DIRECTORY / self.name))))
print('Loaded an existing model')
except:
session.run(tf.global_variables_initializer())
self.summary_writer.add_graph(session.graph, 0)
print('Created a new model')
|
def restore(self, session=None):
'\n \n '
if (session is None):
session = self.session
self.saver = tf.train.Saver()
self.summary = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(str((DIRECTORY / self.name)))
try:
self.saver.restore(session, tf.train.latest_checkpoint(str((DIRECTORY / self.name))))
print('Loaded an existing model')
except:
session.run(tf.global_variables_initializer())
self.summary_writer.add_graph(session.graph, 0)
print('Created a new model')<|docstring|>Restore a saved model or initialize a new one<|endoftext|>
|
3017f45455a1dcdf9fb1a6f17d27236e5f7a2400b51519ac1a2cc83c8ed37a3e
|
def save(self, session=None):
'\n Save the current model\n '
if (session is None):
session = self.session
self.saver.save(session, str(((DIRECTORY / self.name) / 'model')), self.global_step)
|
Save the current model
|
models/model.py
|
save
|
Aggrathon/MtGan
| 0
|
python
|
def save(self, session=None):
'\n \n '
if (session is None):
session = self.session
self.saver.save(session, str(((DIRECTORY / self.name) / 'model')), self.global_step)
|
def save(self, session=None):
'\n \n '
if (session is None):
session = self.session
self.saver.save(session, str(((DIRECTORY / self.name) / 'model')), self.global_step)<|docstring|>Save the current model<|endoftext|>
|
dc44aeccea0ae1bd7962b0287679ba33fbed9ba09319a50e8398cbd76ea7c0d3
|
def train_step(self, summary=False, session=None):
'\n Do a training step and return step_nr and result (and optionally summary to write)\n '
if (session is None):
session = self.session
|
Do a training step and return step_nr and result (and optionally summary to write)
|
models/model.py
|
train_step
|
Aggrathon/MtGan
| 0
|
python
|
def train_step(self, summary=False, session=None):
'\n \n '
if (session is None):
session = self.session
|
def train_step(self, summary=False, session=None):
'\n \n '
if (session is None):
session = self.session<|docstring|>Do a training step and return step_nr and result (and optionally summary to write)<|endoftext|>
|
654b79d4c2e6b2280309a28ea5a40bfa93b334515e536da1bb6b9b90e0650abe
|
def add_summary(self, event, step):
'\n Write a tensorboard summary\n '
self.summary_writer.add_summary(event, step)
|
Write a tensorboard summary
|
models/model.py
|
add_summary
|
Aggrathon/MtGan
| 0
|
python
|
def add_summary(self, event, step):
'\n \n '
self.summary_writer.add_summary(event, step)
|
def add_summary(self, event, step):
'\n \n '
self.summary_writer.add_summary(event, step)<|docstring|>Write a tensorboard summary<|endoftext|>
|
7f031284641faef24b6526510a8375f139ce8904b8c77e18f4df526edc447380
|
def relSDM(obs, mod, sce, cdf_threshold=0.9999999, lower_limit=0.1):
'relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked!)\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_r = obs[(obs >= lower_limit)]
mod_r = mod[(mod >= lower_limit)]
sce_r = sce[(sce >= lower_limit)]
obs_fr = ((1.0 * len(obs_r)) / len(obs))
mod_fr = ((1.0 * len(mod_r)) / len(mod))
sce_fr = ((1.0 * len(sce_r)) / len(sce))
sce_argsort = np.argsort(sce)
obs_gamma = gamma.fit(obs_r, floc=0)
mod_gamma = gamma.fit(mod_r, floc=0)
sce_gamma = gamma.fit(sce_r, floc=0)
obs_cdf = gamma.cdf(np.sort(obs_r), *obs_gamma)
mod_cdf = gamma.cdf(np.sort(mod_r), *mod_gamma)
obs_cdf[(obs_cdf > cdf_threshold)] = cdf_threshold
mod_cdf[(mod_cdf > cdf_threshold)] = cdf_threshold
expected_sce_raindays = min(int(np.round((((len(sce) * obs_fr) * sce_fr) / mod_fr))), len(sce))
sce_cdf = gamma.cdf(np.sort(sce_r), *sce_gamma)
sce_cdf[(sce_cdf > cdf_threshold)] = cdf_threshold
obs_cdf_intpol = np.interp(np.linspace(1, len(obs_r), len(sce_r)), np.linspace(1, len(obs_r), len(obs_r)), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, len(mod_r), len(sce_r)), np.linspace(1, len(mod_r), len(mod_r)), mod_cdf)
obs_inverse = (1.0 / (1 - obs_cdf_intpol))
mod_inverse = (1.0 / (1 - mod_cdf_intpol))
sce_inverse = (1.0 / (1 - sce_cdf))
adapted_cdf = (1 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse)))
adapted_cdf[(adapted_cdf < 0.0)] = 0.0
xvals = ((gamma.ppf(np.sort(adapted_cdf), *obs_gamma) * gamma.ppf(sce_cdf, *sce_gamma)) / gamma.ppf(sce_cdf, *mod_gamma))
correction = np.zeros(len(sce))
if (len(sce_r) > expected_sce_raindays):
xvals = np.interp(np.linspace(1, len(sce_r), expected_sce_raindays), np.linspace(1, len(sce_r), len(sce_r)), xvals)
else:
xvals = np.hstack((np.zeros((expected_sce_raindays - len(sce_r))), xvals))
correction[sce_argsort[(- expected_sce_raindays):]] = xvals
return pd.Series(correction, index=sce.index)
|
relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked!)
returns corrected timeseries
tested with pandas series.
|
ruins/processing/sdm.py
|
relSDM
|
hydrocode-de/RUINSapp
| 2
|
python
|
def relSDM(obs, mod, sce, cdf_threshold=0.9999999, lower_limit=0.1):
'relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked!)\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_r = obs[(obs >= lower_limit)]
mod_r = mod[(mod >= lower_limit)]
sce_r = sce[(sce >= lower_limit)]
obs_fr = ((1.0 * len(obs_r)) / len(obs))
mod_fr = ((1.0 * len(mod_r)) / len(mod))
sce_fr = ((1.0 * len(sce_r)) / len(sce))
sce_argsort = np.argsort(sce)
obs_gamma = gamma.fit(obs_r, floc=0)
mod_gamma = gamma.fit(mod_r, floc=0)
sce_gamma = gamma.fit(sce_r, floc=0)
obs_cdf = gamma.cdf(np.sort(obs_r), *obs_gamma)
mod_cdf = gamma.cdf(np.sort(mod_r), *mod_gamma)
obs_cdf[(obs_cdf > cdf_threshold)] = cdf_threshold
mod_cdf[(mod_cdf > cdf_threshold)] = cdf_threshold
expected_sce_raindays = min(int(np.round((((len(sce) * obs_fr) * sce_fr) / mod_fr))), len(sce))
sce_cdf = gamma.cdf(np.sort(sce_r), *sce_gamma)
sce_cdf[(sce_cdf > cdf_threshold)] = cdf_threshold
obs_cdf_intpol = np.interp(np.linspace(1, len(obs_r), len(sce_r)), np.linspace(1, len(obs_r), len(obs_r)), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, len(mod_r), len(sce_r)), np.linspace(1, len(mod_r), len(mod_r)), mod_cdf)
obs_inverse = (1.0 / (1 - obs_cdf_intpol))
mod_inverse = (1.0 / (1 - mod_cdf_intpol))
sce_inverse = (1.0 / (1 - sce_cdf))
adapted_cdf = (1 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse)))
adapted_cdf[(adapted_cdf < 0.0)] = 0.0
xvals = ((gamma.ppf(np.sort(adapted_cdf), *obs_gamma) * gamma.ppf(sce_cdf, *sce_gamma)) / gamma.ppf(sce_cdf, *mod_gamma))
correction = np.zeros(len(sce))
if (len(sce_r) > expected_sce_raindays):
xvals = np.interp(np.linspace(1, len(sce_r), expected_sce_raindays), np.linspace(1, len(sce_r), len(sce_r)), xvals)
else:
xvals = np.hstack((np.zeros((expected_sce_raindays - len(sce_r))), xvals))
correction[sce_argsort[(- expected_sce_raindays):]] = xvals
return pd.Series(correction, index=sce.index)
|
def relSDM(obs, mod, sce, cdf_threshold=0.9999999, lower_limit=0.1):
'relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked!)\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_r = obs[(obs >= lower_limit)]
mod_r = mod[(mod >= lower_limit)]
sce_r = sce[(sce >= lower_limit)]
obs_fr = ((1.0 * len(obs_r)) / len(obs))
mod_fr = ((1.0 * len(mod_r)) / len(mod))
sce_fr = ((1.0 * len(sce_r)) / len(sce))
sce_argsort = np.argsort(sce)
obs_gamma = gamma.fit(obs_r, floc=0)
mod_gamma = gamma.fit(mod_r, floc=0)
sce_gamma = gamma.fit(sce_r, floc=0)
obs_cdf = gamma.cdf(np.sort(obs_r), *obs_gamma)
mod_cdf = gamma.cdf(np.sort(mod_r), *mod_gamma)
obs_cdf[(obs_cdf > cdf_threshold)] = cdf_threshold
mod_cdf[(mod_cdf > cdf_threshold)] = cdf_threshold
expected_sce_raindays = min(int(np.round((((len(sce) * obs_fr) * sce_fr) / mod_fr))), len(sce))
sce_cdf = gamma.cdf(np.sort(sce_r), *sce_gamma)
sce_cdf[(sce_cdf > cdf_threshold)] = cdf_threshold
obs_cdf_intpol = np.interp(np.linspace(1, len(obs_r), len(sce_r)), np.linspace(1, len(obs_r), len(obs_r)), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, len(mod_r), len(sce_r)), np.linspace(1, len(mod_r), len(mod_r)), mod_cdf)
obs_inverse = (1.0 / (1 - obs_cdf_intpol))
mod_inverse = (1.0 / (1 - mod_cdf_intpol))
sce_inverse = (1.0 / (1 - sce_cdf))
adapted_cdf = (1 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse)))
adapted_cdf[(adapted_cdf < 0.0)] = 0.0
xvals = ((gamma.ppf(np.sort(adapted_cdf), *obs_gamma) * gamma.ppf(sce_cdf, *sce_gamma)) / gamma.ppf(sce_cdf, *mod_gamma))
correction = np.zeros(len(sce))
if (len(sce_r) > expected_sce_raindays):
xvals = np.interp(np.linspace(1, len(sce_r), expected_sce_raindays), np.linspace(1, len(sce_r), len(sce_r)), xvals)
else:
xvals = np.hstack((np.zeros((expected_sce_raindays - len(sce_r))), xvals))
correction[sce_argsort[(- expected_sce_raindays):]] = xvals
return pd.Series(correction, index=sce.index)<|docstring|>relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked!)
returns corrected timeseries
tested with pandas series.<|endoftext|>
|
835dfd6c6a71df8c8096e80c40b125e0917341ac97baee234bed1cf3b94de41a
|
def absSDM(obs, mod, sce, cdf_threshold=0.9999999):
'absolute scaled distribution mapping assuming a normal distributed parameter\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_len = len(obs)
mod_len = len(mod)
sce_len = len(sce)
obs_mean = np.mean(obs)
mod_mean = np.mean(mod)
smean = np.mean(sce)
odetrend = detrend(obs)
mdetrend = detrend(mod)
sdetrend = detrend(sce)
obs_norm = norm.fit(odetrend)
mod_norm = norm.fit(mdetrend)
sce_norm = norm.fit(sdetrend)
sce_diff = (sce - sdetrend)
sce_argsort = np.argsort(sdetrend)
obs_cdf = norm.cdf(np.sort(odetrend), *obs_norm)
mod_cdf = norm.cdf(np.sort(mdetrend), *mod_norm)
sce_cdf = norm.cdf(np.sort(sdetrend), *sce_norm)
obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold), (1 - cdf_threshold))
mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold), (1 - cdf_threshold))
sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold), (1 - cdf_threshold))
obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len), np.linspace(1, obs_len, obs_len), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len), np.linspace(1, mod_len, mod_len), mod_cdf)
obs_cdf_shift = (obs_cdf_intpol - 0.5)
mod_cdf_shift = (mod_cdf_intpol - 0.5)
sce_cdf_shift = (sce_cdf - 0.5)
obs_inverse = (1.0 / (0.5 - np.abs(obs_cdf_shift)))
mod_inverse = (1.0 / (0.5 - np.abs(mod_cdf_shift)))
sce_inverse = (1.0 / (0.5 - np.abs(sce_cdf_shift)))
adapted_cdf = (np.sign(obs_cdf_shift) * (1.0 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse))))
adapted_cdf[(adapted_cdf < 0)] += 1.0
adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold), (1 - cdf_threshold))
xvals = (norm.ppf(np.sort(adapted_cdf), *obs_norm) + ((obs_norm[(- 1)] / mod_norm[(- 1)]) * (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))))
xvals -= xvals.mean()
xvals += (obs_mean + (smean - mod_mean))
correction = np.zeros(sce_len)
correction[sce_argsort] = xvals
correction += (sce_diff - smean)
return correction
|
absolute scaled distribution mapping assuming a normal distributed parameter
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
returns corrected timeseries
tested with pandas series.
|
ruins/processing/sdm.py
|
absSDM
|
hydrocode-de/RUINSapp
| 2
|
python
|
def absSDM(obs, mod, sce, cdf_threshold=0.9999999):
'absolute scaled distribution mapping assuming a normal distributed parameter\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_len = len(obs)
mod_len = len(mod)
sce_len = len(sce)
obs_mean = np.mean(obs)
mod_mean = np.mean(mod)
smean = np.mean(sce)
odetrend = detrend(obs)
mdetrend = detrend(mod)
sdetrend = detrend(sce)
obs_norm = norm.fit(odetrend)
mod_norm = norm.fit(mdetrend)
sce_norm = norm.fit(sdetrend)
sce_diff = (sce - sdetrend)
sce_argsort = np.argsort(sdetrend)
obs_cdf = norm.cdf(np.sort(odetrend), *obs_norm)
mod_cdf = norm.cdf(np.sort(mdetrend), *mod_norm)
sce_cdf = norm.cdf(np.sort(sdetrend), *sce_norm)
obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold), (1 - cdf_threshold))
mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold), (1 - cdf_threshold))
sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold), (1 - cdf_threshold))
obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len), np.linspace(1, obs_len, obs_len), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len), np.linspace(1, mod_len, mod_len), mod_cdf)
obs_cdf_shift = (obs_cdf_intpol - 0.5)
mod_cdf_shift = (mod_cdf_intpol - 0.5)
sce_cdf_shift = (sce_cdf - 0.5)
obs_inverse = (1.0 / (0.5 - np.abs(obs_cdf_shift)))
mod_inverse = (1.0 / (0.5 - np.abs(mod_cdf_shift)))
sce_inverse = (1.0 / (0.5 - np.abs(sce_cdf_shift)))
adapted_cdf = (np.sign(obs_cdf_shift) * (1.0 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse))))
adapted_cdf[(adapted_cdf < 0)] += 1.0
adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold), (1 - cdf_threshold))
xvals = (norm.ppf(np.sort(adapted_cdf), *obs_norm) + ((obs_norm[(- 1)] / mod_norm[(- 1)]) * (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))))
xvals -= xvals.mean()
xvals += (obs_mean + (smean - mod_mean))
correction = np.zeros(sce_len)
correction[sce_argsort] = xvals
correction += (sce_diff - smean)
return correction
|
def absSDM(obs, mod, sce, cdf_threshold=0.9999999):
'absolute scaled distribution mapping assuming a normal distributed parameter\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n\n returns corrected timeseries\n tested with pandas series.\n '
obs_len = len(obs)
mod_len = len(mod)
sce_len = len(sce)
obs_mean = np.mean(obs)
mod_mean = np.mean(mod)
smean = np.mean(sce)
odetrend = detrend(obs)
mdetrend = detrend(mod)
sdetrend = detrend(sce)
obs_norm = norm.fit(odetrend)
mod_norm = norm.fit(mdetrend)
sce_norm = norm.fit(sdetrend)
sce_diff = (sce - sdetrend)
sce_argsort = np.argsort(sdetrend)
obs_cdf = norm.cdf(np.sort(odetrend), *obs_norm)
mod_cdf = norm.cdf(np.sort(mdetrend), *mod_norm)
sce_cdf = norm.cdf(np.sort(sdetrend), *sce_norm)
obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold), (1 - cdf_threshold))
mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold), (1 - cdf_threshold))
sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold), (1 - cdf_threshold))
obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len), np.linspace(1, obs_len, obs_len), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len), np.linspace(1, mod_len, mod_len), mod_cdf)
obs_cdf_shift = (obs_cdf_intpol - 0.5)
mod_cdf_shift = (mod_cdf_intpol - 0.5)
sce_cdf_shift = (sce_cdf - 0.5)
obs_inverse = (1.0 / (0.5 - np.abs(obs_cdf_shift)))
mod_inverse = (1.0 / (0.5 - np.abs(mod_cdf_shift)))
sce_inverse = (1.0 / (0.5 - np.abs(sce_cdf_shift)))
adapted_cdf = (np.sign(obs_cdf_shift) * (1.0 - (1.0 / ((obs_inverse * sce_inverse) / mod_inverse))))
adapted_cdf[(adapted_cdf < 0)] += 1.0
adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold), (1 - cdf_threshold))
xvals = (norm.ppf(np.sort(adapted_cdf), *obs_norm) + ((obs_norm[(- 1)] / mod_norm[(- 1)]) * (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))))
xvals -= xvals.mean()
xvals += (obs_mean + (smean - mod_mean))
correction = np.zeros(sce_len)
correction[sce_argsort] = xvals
correction += (sce_diff - smean)
return correction<|docstring|>absolute scaled distribution mapping assuming a normal distributed parameter
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
returns corrected timeseries
tested with pandas series.<|endoftext|>
|
5614a43be2fd5460cb540abc68b18cef4e167741e5f60a52e3cd00bd1d9639c8
|
def SDM(obs, mod, sce, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
"scaled distribution mapping - wrapper to relative and absolute bias correction functions\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n meth :: 'rel' for relative SDM, else absolute SDM will be performed\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')\n\n The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.\n\n returns corrected timeseries\n tested with pandas series.\n "
if (meth == 'rel'):
return relSDM(obs, mod, sce, cdf_threshold, lower_limit)
else:
return absSDM(obs, mod, sce, cdf_threshold)
|
scaled distribution mapping - wrapper to relative and absolute bias correction functions
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
meth :: 'rel' for relative SDM, else absolute SDM will be performed
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')
The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.
returns corrected timeseries
tested with pandas series.
|
ruins/processing/sdm.py
|
SDM
|
hydrocode-de/RUINSapp
| 2
|
python
|
def SDM(obs, mod, sce, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
"scaled distribution mapping - wrapper to relative and absolute bias correction functions\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n meth :: 'rel' for relative SDM, else absolute SDM will be performed\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')\n\n The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.\n\n returns corrected timeseries\n tested with pandas series.\n "
if (meth == 'rel'):
return relSDM(obs, mod, sce, cdf_threshold, lower_limit)
else:
return absSDM(obs, mod, sce, cdf_threshold)
|
def SDM(obs, mod, sce, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
"scaled distribution mapping - wrapper to relative and absolute bias correction functions\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n meth :: 'rel' for relative SDM, else absolute SDM will be performed\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')\n\n The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.\n\n returns corrected timeseries\n tested with pandas series.\n "
if (meth == 'rel'):
return relSDM(obs, mod, sce, cdf_threshold, lower_limit)
else:
return absSDM(obs, mod, sce, cdf_threshold)<|docstring|>scaled distribution mapping - wrapper to relative and absolute bias correction functions
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
meth :: 'rel' for relative SDM, else absolute SDM will be performed
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')
The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.
returns corrected timeseries
tested with pandas series.<|endoftext|>
|
69eda780e0c47d08124c9ea73f712c3fcf5d00df739ceda11f41741fa2b5eee4
|
def __init__(self, graph_name='COM'):
'\n :param graph_name: 所创建的图表名称\n '
self._redis_content = RedisContent().get_content
self._redis_graph = Graph(graph_name, self._redis_content)
self.index = 0
|
:param graph_name: 所创建的图表名称
|
graph/graphSelect.py
|
__init__
|
Liangchengdeye/redisGraph
| 0
|
python
|
def __init__(self, graph_name='COM'):
'\n \n '
self._redis_content = RedisContent().get_content
self._redis_graph = Graph(graph_name, self._redis_content)
self.index = 0
|
def __init__(self, graph_name='COM'):
'\n \n '
self._redis_content = RedisContent().get_content
self._redis_graph = Graph(graph_name, self._redis_content)
self.index = 0<|docstring|>:param graph_name: 所创建的图表名称<|endoftext|>
|
bb9dfc81635f01aeddb3c3ef8eae048e835ce3264153f2efc48d7de92f6a3309
|
def select_all(self):
'\n 全表查询\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n RETURN c,p\n '
self.get_graph_msg(query_sql)
|
全表查询
:return:
|
graph/graphSelect.py
|
select_all
|
Liangchengdeye/redisGraph
| 0
|
python
|
def select_all(self):
'\n 全表查询\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n RETURN c,p\n '
self.get_graph_msg(query_sql)
|
def select_all(self):
'\n 全表查询\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n RETURN c,p\n '
self.get_graph_msg(query_sql)<|docstring|>全表查询
:return:<|endoftext|>
|
335c7259051a1fc041cad14b84740c5f154b86a271040c9315c64c61353b0f23
|
def select_where(self):
'\n 查询招聘信息来源于58同城的\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE c.source="58同城招聘"\n RETURN c,p\n '
self.get_graph_msg(query_sql)
|
查询招聘信息来源于58同城的
:return:
|
graph/graphSelect.py
|
select_where
|
Liangchengdeye/redisGraph
| 0
|
python
|
def select_where(self):
'\n 查询招聘信息来源于58同城的\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE c.source="58同城招聘"\n RETURN c,p\n '
self.get_graph_msg(query_sql)
|
def select_where(self):
'\n 查询招聘信息来源于58同城的\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE c.source="58同城招聘"\n RETURN c,p\n '
self.get_graph_msg(query_sql)<|docstring|>查询招聘信息来源于58同城的
:return:<|endoftext|>
|
a094b3a6f09380e51b031cd71140dc367c460ce5894ea68c1255fd588185df5c
|
def select_work(self):
'\n 查询工作地点在北京的公司名称,职位名称\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE p.workAddress="北京"\n RETURN c.companyName,p.positionName\n '
self.get_graph_msg(query_sql)
|
查询工作地点在北京的公司名称,职位名称
:return:
|
graph/graphSelect.py
|
select_work
|
Liangchengdeye/redisGraph
| 0
|
python
|
def select_work(self):
'\n 查询工作地点在北京的公司名称,职位名称\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE p.workAddress="北京"\n RETURN c.companyName,p.positionName\n '
self.get_graph_msg(query_sql)
|
def select_work(self):
'\n 查询工作地点在北京的公司名称,职位名称\n :return:\n '
query_sql = '\n MATCH (c:company)-[:have]->(p:position)\n WHERE p.workAddress="北京"\n RETURN c.companyName,p.positionName\n '
self.get_graph_msg(query_sql)<|docstring|>查询工作地点在北京的公司名称,职位名称
:return:<|endoftext|>
|
0192bafa2dbd93d4c6e3dfcc783e4a2a657a5788681a8d15e798e37f44b0c805
|
@property
def id(self):
'\n Alias for exon_id necessary for backward compatibility.\n '
return self.exon_id
|
Alias for exon_id necessary for backward compatibility.
|
pyensembl/exon.py
|
id
|
scottdbrown/pyensembl
| 0
|
python
|
@property
def id(self):
'\n \n '
return self.exon_id
|
@property
def id(self):
'\n \n '
return self.exon_id<|docstring|>Alias for exon_id necessary for backward compatibility.<|endoftext|>
|
7a86a6a848900ff9e8cff62c9b34df674f2edb7ddae949057bdefaa798bdbabe
|
def plotDives(calc_file_path, new_file_path, is_export, min_length=60, required_depth=None, max_depth=None, interest_variables=[], shading='deep'):
'\n This function pulls individual dives from the data that meet defined criteria.\n It then plots these dives next to each other starting from a shared zeroed start time.\n The dives can be colorscaled based on "interest variables"\n\n Inputs:\n min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)\n required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)\n max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)\n interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each\n shading : type string, choose any PlotLy colorscale to set the color (Default is \'deep\')\n\n Tips:\n For cyclical data like Pitch, Roll, or Heading try setting \'shading\' to \'icefire\' one of PlotLy\'s cyclical colorscales\n Though not technically cyclical, \'balance\' provides a similar effect\n '
data = pd.read_csv(calc_file_path)
fs = data['fs'].tolist()[0]
depth = np.array(data['Depth'])
numData = len(depth)
t = np.array([((x / fs) / 3600) for x in range(numData)])
sigma = np.std(depth[0:(fs * 2)])
surface = (depth * [(depth < (6 * sigma))])[0]
diveIndexes = np.where((surface == 0))[0]
lstDives = np.split(diveIndexes, (np.where((np.diff(diveIndexes) != 1))[0] + 1))
dives = {}
for d in lstDives:
diveDepth = depth[d]
if ((len(d) >= (fs * min_length)) and (True if (required_depth == None) else (np.max(diveDepth) >= required_depth)) and (True if (max_depth == None) else (np.max(diveDepth) <= max_depth))):
num = (len(dives) + 1)
dive = {}
dive['name'] = ('Dive ' + str(num))
dive['depth'] = diveDepth
dive['time'] = t[:len(d)]
dive['idx'] = d
dives[(num - 1)] = dive
if (not interest_variables):
fig = go.Figure()
else:
fig = go.Figure(make_subplots(rows=len(interest_variables), cols=1, specs=([[{}]] * len(interest_variables)), subplot_titles=[('Colorscale based on ' + name) for name in interest_variables], shared_xaxes=True))
if (not interest_variables):
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], mode='markers'))
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True))
else:
numVars = len(interest_variables)
clocSpacing = ((1 / numVars) / 2)
cbarlocs = [(1 - (clocSpacing * (1 + (2 * i)))) for i in range(numVars)]
for k in range(len(interest_variables)):
var = interest_variables[k]
varData = np.array(data[var])
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], legendgroup=str(i), showlegend=(True if (k == 0) else False), mode='markers', marker=dict(color=varData[d['idx']], cmax=np.max(varData), cmin=np.min(varData), colorbar=dict(title=var, len=(clocSpacing * 2), x=(- 0.15), y=cbarlocs[k]), colorscale=shading)), row=(k + 1), col=1)
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True), col=1, row=numVars)
fig.update_yaxes(title='Depth (m)', autorange='reversed')
fig.update_layout(title='Dives')
if (len(dives) == 0):
raise Exception('No dives found.')
if is_export:
fig.write_html(new_file_path)
else:
fig.show()
|
This function pulls individual dives from the data that meet defined criteria.
It then plots these dives next to each other starting from a shared zeroed start time.
The dives can be colorscaled based on "interest variables"
Inputs:
min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)
required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)
max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)
interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each
shading : type string, choose any PlotLy colorscale to set the color (Default is 'deep')
Tips:
For cyclical data like Pitch, Roll, or Heading try setting 'shading' to 'icefire' one of PlotLy's cyclical colorscales
Though not technically cyclical, 'balance' provides a similar effect
|
src/app/scripts_dev/dives.py
|
plotDives
|
NowacekLab/Duke-Whale-TagDataVis
| 0
|
python
|
def plotDives(calc_file_path, new_file_path, is_export, min_length=60, required_depth=None, max_depth=None, interest_variables=[], shading='deep'):
'\n This function pulls individual dives from the data that meet defined criteria.\n It then plots these dives next to each other starting from a shared zeroed start time.\n The dives can be colorscaled based on "interest variables"\n\n Inputs:\n min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)\n required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)\n max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)\n interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each\n shading : type string, choose any PlotLy colorscale to set the color (Default is \'deep\')\n\n Tips:\n For cyclical data like Pitch, Roll, or Heading try setting \'shading\' to \'icefire\' one of PlotLy\'s cyclical colorscales\n Though not technically cyclical, \'balance\' provides a similar effect\n '
data = pd.read_csv(calc_file_path)
fs = data['fs'].tolist()[0]
depth = np.array(data['Depth'])
numData = len(depth)
t = np.array([((x / fs) / 3600) for x in range(numData)])
sigma = np.std(depth[0:(fs * 2)])
surface = (depth * [(depth < (6 * sigma))])[0]
diveIndexes = np.where((surface == 0))[0]
lstDives = np.split(diveIndexes, (np.where((np.diff(diveIndexes) != 1))[0] + 1))
dives = {}
for d in lstDives:
diveDepth = depth[d]
if ((len(d) >= (fs * min_length)) and (True if (required_depth == None) else (np.max(diveDepth) >= required_depth)) and (True if (max_depth == None) else (np.max(diveDepth) <= max_depth))):
num = (len(dives) + 1)
dive = {}
dive['name'] = ('Dive ' + str(num))
dive['depth'] = diveDepth
dive['time'] = t[:len(d)]
dive['idx'] = d
dives[(num - 1)] = dive
if (not interest_variables):
fig = go.Figure()
else:
fig = go.Figure(make_subplots(rows=len(interest_variables), cols=1, specs=([[{}]] * len(interest_variables)), subplot_titles=[('Colorscale based on ' + name) for name in interest_variables], shared_xaxes=True))
if (not interest_variables):
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], mode='markers'))
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True))
else:
numVars = len(interest_variables)
clocSpacing = ((1 / numVars) / 2)
cbarlocs = [(1 - (clocSpacing * (1 + (2 * i)))) for i in range(numVars)]
for k in range(len(interest_variables)):
var = interest_variables[k]
varData = np.array(data[var])
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], legendgroup=str(i), showlegend=(True if (k == 0) else False), mode='markers', marker=dict(color=varData[d['idx']], cmax=np.max(varData), cmin=np.min(varData), colorbar=dict(title=var, len=(clocSpacing * 2), x=(- 0.15), y=cbarlocs[k]), colorscale=shading)), row=(k + 1), col=1)
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True), col=1, row=numVars)
fig.update_yaxes(title='Depth (m)', autorange='reversed')
fig.update_layout(title='Dives')
if (len(dives) == 0):
raise Exception('No dives found.')
if is_export:
fig.write_html(new_file_path)
else:
fig.show()
|
def plotDives(calc_file_path, new_file_path, is_export, min_length=60, required_depth=None, max_depth=None, interest_variables=[], shading='deep'):
'\n This function pulls individual dives from the data that meet defined criteria.\n It then plots these dives next to each other starting from a shared zeroed start time.\n The dives can be colorscaled based on "interest variables"\n\n Inputs:\n min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)\n required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)\n max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)\n interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each\n shading : type string, choose any PlotLy colorscale to set the color (Default is \'deep\')\n\n Tips:\n For cyclical data like Pitch, Roll, or Heading try setting \'shading\' to \'icefire\' one of PlotLy\'s cyclical colorscales\n Though not technically cyclical, \'balance\' provides a similar effect\n '
data = pd.read_csv(calc_file_path)
fs = data['fs'].tolist()[0]
depth = np.array(data['Depth'])
numData = len(depth)
t = np.array([((x / fs) / 3600) for x in range(numData)])
sigma = np.std(depth[0:(fs * 2)])
surface = (depth * [(depth < (6 * sigma))])[0]
diveIndexes = np.where((surface == 0))[0]
lstDives = np.split(diveIndexes, (np.where((np.diff(diveIndexes) != 1))[0] + 1))
dives = {}
for d in lstDives:
diveDepth = depth[d]
if ((len(d) >= (fs * min_length)) and (True if (required_depth == None) else (np.max(diveDepth) >= required_depth)) and (True if (max_depth == None) else (np.max(diveDepth) <= max_depth))):
num = (len(dives) + 1)
dive = {}
dive['name'] = ('Dive ' + str(num))
dive['depth'] = diveDepth
dive['time'] = t[:len(d)]
dive['idx'] = d
dives[(num - 1)] = dive
if (not interest_variables):
fig = go.Figure()
else:
fig = go.Figure(make_subplots(rows=len(interest_variables), cols=1, specs=([[{}]] * len(interest_variables)), subplot_titles=[('Colorscale based on ' + name) for name in interest_variables], shared_xaxes=True))
if (not interest_variables):
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], mode='markers'))
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True))
else:
numVars = len(interest_variables)
clocSpacing = ((1 / numVars) / 2)
cbarlocs = [(1 - (clocSpacing * (1 + (2 * i)))) for i in range(numVars)]
for k in range(len(interest_variables)):
var = interest_variables[k]
varData = np.array(data[var])
for i in range(len(dives)):
d = dives[i]
fig.add_trace(go.Scattergl(x=d['time'], y=d['depth'], name=d['name'], legendgroup=str(i), showlegend=(True if (k == 0) else False), mode='markers', marker=dict(color=varData[d['idx']], cmax=np.max(varData), cmin=np.min(varData), colorbar=dict(title=var, len=(clocSpacing * 2), x=(- 0.15), y=cbarlocs[k]), colorscale=shading)), row=(k + 1), col=1)
fig.update_xaxes(title='Time (hr)', rangeslider=dict(visible=True), col=1, row=numVars)
fig.update_yaxes(title='Depth (m)', autorange='reversed')
fig.update_layout(title='Dives')
if (len(dives) == 0):
raise Exception('No dives found.')
if is_export:
fig.write_html(new_file_path)
else:
fig.show()<|docstring|>This function pulls individual dives from the data that meet defined criteria.
It then plots these dives next to each other starting from a shared zeroed start time.
The dives can be colorscaled based on "interest variables"
Inputs:
min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)
required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)
max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)
interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each
shading : type string, choose any PlotLy colorscale to set the color (Default is 'deep')
Tips:
For cyclical data like Pitch, Roll, or Heading try setting 'shading' to 'icefire' one of PlotLy's cyclical colorscales
Though not technically cyclical, 'balance' provides a similar effect<|endoftext|>
|
50f772f3b68ea4849374bc80b330d9edb19e56cf91b15a1f896fb6ee7793e451
|
def matrix_print(matrix, format='d'):
'\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n print("{:3d}".format(matrix[row][col]), end="")\n print()\n print()\n '
for row in matrix:
for val in row:
if (format == 'd'):
print('{:3d}'.format(val), end='')
if (format == 's'):
print('{:3s}'.format(val), end='')
print()
'\n # another way of printing in programs\n\n row = len(matrix)\n col = len(matrix[0])\n\n for i in range(row):\n for j in range(col):\n\n print()\n '
|
for row in range(len(matrix)):
for col in range(len(matrix[0])):
print("{:3d}".format(matrix[row][col]), end="")
print()
print()
|
recursion/paths.py
|
matrix_print
|
santoshmano/pybricks
| 0
|
python
|
def matrix_print(matrix, format='d'):
'\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n print("{:3d}".format(matrix[row][col]), end=)\n print()\n print()\n '
for row in matrix:
for val in row:
if (format == 'd'):
print('{:3d}'.format(val), end=)
if (format == 's'):
print('{:3s}'.format(val), end=)
print()
'\n # another way of printing in programs\n\n row = len(matrix)\n col = len(matrix[0])\n\n for i in range(row):\n for j in range(col):\n\n print()\n '
|
def matrix_print(matrix, format='d'):
'\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n print("{:3d}".format(matrix[row][col]), end=)\n print()\n print()\n '
for row in matrix:
for val in row:
if (format == 'd'):
print('{:3d}'.format(val), end=)
if (format == 's'):
print('{:3s}'.format(val), end=)
print()
'\n # another way of printing in programs\n\n row = len(matrix)\n col = len(matrix[0])\n\n for i in range(row):\n for j in range(col):\n\n print()\n '<|docstring|>for row in range(len(matrix)):
for col in range(len(matrix[0])):
print("{:3d}".format(matrix[row][col]), end="")
print()
print()<|endoftext|>
|
a92f3860d5c9b6d00690985b4f32eba399a6139cf85b6339eb17302ba5da93c3
|
def is_registered(self):
'\n Determines if function is already registered in redis database.\n Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.\n Returns:\n is registered (boolean)\n '
dumped_registrations = execute('RG.DUMPREGISTRATIONS')
if (not dumped_registrations):
return False
for registration in dumped_registrations:
data = dict(zip(registration[0::2], registration[1::2]))
registration_data = dict(zip(data['RegistrationData'][0::2], data['RegistrationData'][1::2]))
if (self.command_name in registration_data['args']):
return True
return False
|
Determines if function is already registered in redis database.
Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.
Returns:
is registered (boolean)
|
redis/redis_functions/game_functions.py
|
is_registered
|
SomeKidXD/online_game
| 2
|
python
|
def is_registered(self):
'\n Determines if function is already registered in redis database.\n Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.\n Returns:\n is registered (boolean)\n '
dumped_registrations = execute('RG.DUMPREGISTRATIONS')
if (not dumped_registrations):
return False
for registration in dumped_registrations:
data = dict(zip(registration[0::2], registration[1::2]))
registration_data = dict(zip(data['RegistrationData'][0::2], data['RegistrationData'][1::2]))
if (self.command_name in registration_data['args']):
return True
return False
|
def is_registered(self):
'\n Determines if function is already registered in redis database.\n Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.\n Returns:\n is registered (boolean)\n '
dumped_registrations = execute('RG.DUMPREGISTRATIONS')
if (not dumped_registrations):
return False
for registration in dumped_registrations:
data = dict(zip(registration[0::2], registration[1::2]))
registration_data = dict(zip(data['RegistrationData'][0::2], data['RegistrationData'][1::2]))
if (self.command_name in registration_data['args']):
return True
return False<|docstring|>Determines if function is already registered in redis database.
Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.
Returns:
is registered (boolean)<|endoftext|>
|
00c07b91842bf206f34f32bac238047dcedd3a11abb9c49364ff047f33ae8aae
|
def register_command(self):
'\n Registers a redis gears function to redis.\n This is a super class placeholder function meant to be overridden.\n\n Raises:\n NotImplementedError()\n '
raise NotImplementedError(self.__class__.__name__)
|
Registers a redis gears function to redis.
This is a super class placeholder function meant to be overridden.
Raises:
NotImplementedError()
|
redis/redis_functions/game_functions.py
|
register_command
|
SomeKidXD/online_game
| 2
|
python
|
def register_command(self):
'\n Registers a redis gears function to redis.\n This is a super class placeholder function meant to be overridden.\n\n Raises:\n NotImplementedError()\n '
raise NotImplementedError(self.__class__.__name__)
|
def register_command(self):
'\n Registers a redis gears function to redis.\n This is a super class placeholder function meant to be overridden.\n\n Raises:\n NotImplementedError()\n '
raise NotImplementedError(self.__class__.__name__)<|docstring|>Registers a redis gears function to redis.
This is a super class placeholder function meant to be overridden.
Raises:
NotImplementedError()<|endoftext|>
|
903920471100b3fcfd59eda24fedc7fc1215c504c6a3fcd5d69f33711f713350
|
def register_command(self):
'\n Determines finds public server to join to.\n\n Arguments:\n user\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER find_game user1\n\n '
def query():
return execute('FT.SEARCH', 'GAME', f"'(@playercount:[0 {(MAX_PLAYERS_IN_GAME - 1)}])'", 'SORTBY', 'playercount', 'DESC', 'LIMIT', '0', '1')
def find_game(user_id):
game = query()
if ((game != [0]) and (type(game) == list)):
return game[1].split(':')[1]
game = execute('RG.TRIGGER', 'create_new_game', f'USER:{user_id}')
if game:
return game[0]
GB('CommandReader').map((lambda x: find_game(*x[1:]))).register(trigger=self.command_name)
|
Determines finds public server to join to.
Arguments:
user
Returns:
redis key [GAME:game_id]
Trigger example:
RG.TRIGGER find_game user1
|
redis/redis_functions/game_functions.py
|
register_command
|
SomeKidXD/online_game
| 2
|
python
|
def register_command(self):
'\n Determines finds public server to join to.\n\n Arguments:\n user\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER find_game user1\n\n '
def query():
return execute('FT.SEARCH', 'GAME', f"'(@playercount:[0 {(MAX_PLAYERS_IN_GAME - 1)}])'", 'SORTBY', 'playercount', 'DESC', 'LIMIT', '0', '1')
def find_game(user_id):
game = query()
if ((game != [0]) and (type(game) == list)):
return game[1].split(':')[1]
game = execute('RG.TRIGGER', 'create_new_game', f'USER:{user_id}')
if game:
return game[0]
GB('CommandReader').map((lambda x: find_game(*x[1:]))).register(trigger=self.command_name)
|
def register_command(self):
'\n Determines finds public server to join to.\n\n Arguments:\n user\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER find_game user1\n\n '
def query():
return execute('FT.SEARCH', 'GAME', f"'(@playercount:[0 {(MAX_PLAYERS_IN_GAME - 1)}])'", 'SORTBY', 'playercount', 'DESC', 'LIMIT', '0', '1')
def find_game(user_id):
game = query()
if ((game != [0]) and (type(game) == list)):
return game[1].split(':')[1]
game = execute('RG.TRIGGER', 'create_new_game', f'USER:{user_id}')
if game:
return game[0]
GB('CommandReader').map((lambda x: find_game(*x[1:]))).register(trigger=self.command_name)<|docstring|>Determines finds public server to join to.
Arguments:
user
Returns:
redis key [GAME:game_id]
Trigger example:
RG.TRIGGER find_game user1<|endoftext|>
|
3fb5890ba780eb0a025adac34276397780c210fa0f352b8d71b9488d6ced36ad
|
def register_command(self):
'\n Determines best public server to join to.\n - Assings User to the Game.\n - Increments playercount\n Arguments:\n user, game, secret (optional)\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER join_game user1 game1\n RG.TRIGGER join_game user1 game1 secret123\n\n '
def assign_to_game(user_id, game_id):
execute('HSET', f'GAME:{game_id}', f'USER:{user_id}', int(datetime.now().timestamp()))
execute('HINCRBY', f'GAME:{game_id}', 'playercount', 1)
def is_authorized(user_id, game_id, secret):
return execute('RG.TRIGGER', 'user_authorized', user_id, game_id, secret)
def subcall(user_id, game_id, secret=''):
if (not is_authorized(user_id, game_id, secret)):
return False
assign_to_game(user_id, game_id)
return game_id
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name)
|
Determines best public server to join to.
- Assings User to the Game.
- Increments playercount
Arguments:
user, game, secret (optional)
Returns:
redis key [GAME:game_id]
Trigger example:
RG.TRIGGER join_game user1 game1
RG.TRIGGER join_game user1 game1 secret123
|
redis/redis_functions/game_functions.py
|
register_command
|
SomeKidXD/online_game
| 2
|
python
|
def register_command(self):
'\n Determines best public server to join to.\n - Assings User to the Game.\n - Increments playercount\n Arguments:\n user, game, secret (optional)\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER join_game user1 game1\n RG.TRIGGER join_game user1 game1 secret123\n\n '
def assign_to_game(user_id, game_id):
execute('HSET', f'GAME:{game_id}', f'USER:{user_id}', int(datetime.now().timestamp()))
execute('HINCRBY', f'GAME:{game_id}', 'playercount', 1)
def is_authorized(user_id, game_id, secret):
return execute('RG.TRIGGER', 'user_authorized', user_id, game_id, secret)
def subcall(user_id, game_id, secret=):
if (not is_authorized(user_id, game_id, secret)):
return False
assign_to_game(user_id, game_id)
return game_id
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name)
|
def register_command(self):
'\n Determines best public server to join to.\n - Assings User to the Game.\n - Increments playercount\n Arguments:\n user, game, secret (optional)\n Returns:\n redis key [GAME:game_id]\n Trigger example:\n RG.TRIGGER join_game user1 game1\n RG.TRIGGER join_game user1 game1 secret123\n\n '
def assign_to_game(user_id, game_id):
execute('HSET', f'GAME:{game_id}', f'USER:{user_id}', int(datetime.now().timestamp()))
execute('HINCRBY', f'GAME:{game_id}', 'playercount', 1)
def is_authorized(user_id, game_id, secret):
return execute('RG.TRIGGER', 'user_authorized', user_id, game_id, secret)
def subcall(user_id, game_id, secret=):
if (not is_authorized(user_id, game_id, secret)):
return False
assign_to_game(user_id, game_id)
return game_id
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name)<|docstring|>Determines best public server to join to.
- Assings User to the Game.
- Increments playercount
Arguments:
user, game, secret (optional)
Returns:
redis key [GAME:game_id]
Trigger example:
RG.TRIGGER join_game user1 game1
RG.TRIGGER join_game user1 game1 secret123<|endoftext|>
|
9c9b49c97c3ebbf30a30b02e0b1617f7bea1effc3378be976a088ea58c5fbcc0
|
def register_command(self):
'\n Determines best public server to join to.\n - Removes USER to the ROOM.\n - Decrements playercount\n - Publishes a notification\n Arguments:\n user, game\n Returns:\n None\n Trigger example:\n RG.TRIGGER leave_game user1 game1\n '
def subcall(user_id, game_id, secret=None):
execute('HDEL', f'GAME:{game_id}', f'USER:{user_id}')
execute('HINCRBY', f'GAME:{game_id}', 'playercount', (- 1))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')
|
Determines best public server to join to.
- Removes USER to the ROOM.
- Decrements playercount
- Publishes a notification
Arguments:
user, game
Returns:
None
Trigger example:
RG.TRIGGER leave_game user1 game1
|
redis/redis_functions/game_functions.py
|
register_command
|
SomeKidXD/online_game
| 2
|
python
|
def register_command(self):
'\n Determines best public server to join to.\n - Removes USER to the ROOM.\n - Decrements playercount\n - Publishes a notification\n Arguments:\n user, game\n Returns:\n None\n Trigger example:\n RG.TRIGGER leave_game user1 game1\n '
def subcall(user_id, game_id, secret=None):
execute('HDEL', f'GAME:{game_id}', f'USER:{user_id}')
execute('HINCRBY', f'GAME:{game_id}', 'playercount', (- 1))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')
|
def register_command(self):
'\n Determines best public server to join to.\n - Removes USER to the ROOM.\n - Decrements playercount\n - Publishes a notification\n Arguments:\n user, game\n Returns:\n None\n Trigger example:\n RG.TRIGGER leave_game user1 game1\n '
def subcall(user_id, game_id, secret=None):
execute('HDEL', f'GAME:{game_id}', f'USER:{user_id}')
execute('HINCRBY', f'GAME:{game_id}', 'playercount', (- 1))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')<|docstring|>Determines best public server to join to.
- Removes USER to the ROOM.
- Decrements playercount
- Publishes a notification
Arguments:
user, game
Returns:
None
Trigger example:
RG.TRIGGER leave_game user1 game1<|endoftext|>
|
ca38749f3e3a1e6b243deca15af67634a65d18f59b96f8b8b5237674f4499e27
|
def register_command(self):
'\n Determines if user can join the room\n Arguments:\n user, game\n Returns:\n Boolean\n Trigger example:\n RG.TRIGGER user_authorized user1 game1\n '
def subcall(user_id, game_id, secret):
return ((execute('HGET', f'GAME:{game_id}', 'secret') == secret) or (execute('HGET', f'GAME:{game_id}', f'USER:{user_id}') != 'None') or (execute('HGET', f'GAME:{game_id}', 'owner') == f'USER:{user_id}'))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')
|
Determines if user can join the room
Arguments:
user, game
Returns:
Boolean
Trigger example:
RG.TRIGGER user_authorized user1 game1
|
redis/redis_functions/game_functions.py
|
register_command
|
SomeKidXD/online_game
| 2
|
python
|
def register_command(self):
'\n Determines if user can join the room\n Arguments:\n user, game\n Returns:\n Boolean\n Trigger example:\n RG.TRIGGER user_authorized user1 game1\n '
def subcall(user_id, game_id, secret):
return ((execute('HGET', f'GAME:{game_id}', 'secret') == secret) or (execute('HGET', f'GAME:{game_id}', f'USER:{user_id}') != 'None') or (execute('HGET', f'GAME:{game_id}', 'owner') == f'USER:{user_id}'))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')
|
def register_command(self):
'\n Determines if user can join the room\n Arguments:\n user, game\n Returns:\n Boolean\n Trigger example:\n RG.TRIGGER user_authorized user1 game1\n '
def subcall(user_id, game_id, secret):
return ((execute('HGET', f'GAME:{game_id}', 'secret') == secret) or (execute('HGET', f'GAME:{game_id}', f'USER:{user_id}') != 'None') or (execute('HGET', f'GAME:{game_id}', 'owner') == f'USER:{user_id}'))
GB('CommandReader').map((lambda x: subcall(*x[1:]))).register(trigger=self.command_name, mode='sync')<|docstring|>Determines if user can join the room
Arguments:
user, game
Returns:
Boolean
Trigger example:
RG.TRIGGER user_authorized user1 game1<|endoftext|>
|
44e13b9d201c5d8d1c26a32f6654eccc91329821147ed8c22cd6616b24e755de
|
def run_test(self):
'Main test logic'
self.log.info('Compare responses from getinfo RPC and `digibyte-cli getinfo`')
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
|
Main test logic
|
test/functional/digibyte_cli.py
|
run_test
|
argentum3/digibyte
| 431
|
python
|
def run_test(self):
self.log.info('Compare responses from getinfo RPC and `digibyte-cli getinfo`')
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
|
def run_test(self):
self.log.info('Compare responses from getinfo RPC and `digibyte-cli getinfo`')
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)<|docstring|>Main test logic<|endoftext|>
|
7343b2c217f7cc4127ad2da789a7964daaa0b9be786463bb3c1cf40debf247bc
|
def test_init_default(self) -> None:
'Test init default.'
obj = RunwayStaticSiteCustomErrorResponseDataModel()
assert (not obj.ErrorCachingMinTTL)
assert (not obj.ErrorCode)
assert (not obj.ResponseCode)
assert (not obj.ResponsePagePath)
|
Test init default.
|
tests/unit/module/staticsite/parameters/test_models.py
|
test_init_default
|
blade2005/runway
| 134
|
python
|
def test_init_default(self) -> None:
obj = RunwayStaticSiteCustomErrorResponseDataModel()
assert (not obj.ErrorCachingMinTTL)
assert (not obj.ErrorCode)
assert (not obj.ResponseCode)
assert (not obj.ResponsePagePath)
|
def test_init_default(self) -> None:
obj = RunwayStaticSiteCustomErrorResponseDataModel()
assert (not obj.ErrorCachingMinTTL)
assert (not obj.ErrorCode)
assert (not obj.ResponseCode)
assert (not obj.ResponsePagePath)<|docstring|>Test init default.<|endoftext|>
|
3b63e3cc97db73023c5dd06658c4e9e30af7669fcc4fa1dc150b7c8950611e24
|
def test_init_extra(self) -> None:
'Test init extra.'
with pytest.raises(ValidationError):
RunwayStaticSiteCustomErrorResponseDataModel(invalid='val')
|
Test init extra.
|
tests/unit/module/staticsite/parameters/test_models.py
|
test_init_extra
|
blade2005/runway
| 134
|
python
|
def test_init_extra(self) -> None:
with pytest.raises(ValidationError):
RunwayStaticSiteCustomErrorResponseDataModel(invalid='val')
|
def test_init_extra(self) -> None:
with pytest.raises(ValidationError):
RunwayStaticSiteCustomErrorResponseDataModel(invalid='val')<|docstring|>Test init extra.<|endoftext|>
|
dac811fefe99af31ec080d80f03ad72f7c8aefeb4c20915683e53ba5a18023fe
|
def test_init(self) -> None:
'Test init.'
data = {'ErrorCachingMinTTL': 30, 'ErrorCode': 404, 'ResponseCode': 404, 'ResponsePagePath': './errors/404.html'}
obj = RunwayStaticSiteCustomErrorResponseDataModel(**data)
assert (obj.ErrorCachingMinTTL == data['ErrorCachingMinTTL'])
assert (obj.ErrorCode == data['ErrorCode'])
assert (obj.ResponseCode == data['ResponseCode'])
assert (obj.ResponsePagePath == data['ResponsePagePath'])
|
Test init.
|
tests/unit/module/staticsite/parameters/test_models.py
|
test_init
|
blade2005/runway
| 134
|
python
|
def test_init(self) -> None:
data = {'ErrorCachingMinTTL': 30, 'ErrorCode': 404, 'ResponseCode': 404, 'ResponsePagePath': './errors/404.html'}
obj = RunwayStaticSiteCustomErrorResponseDataModel(**data)
assert (obj.ErrorCachingMinTTL == data['ErrorCachingMinTTL'])
assert (obj.ErrorCode == data['ErrorCode'])
assert (obj.ResponseCode == data['ResponseCode'])
assert (obj.ResponsePagePath == data['ResponsePagePath'])
|
def test_init(self) -> None:
data = {'ErrorCachingMinTTL': 30, 'ErrorCode': 404, 'ResponseCode': 404, 'ResponsePagePath': './errors/404.html'}
obj = RunwayStaticSiteCustomErrorResponseDataModel(**data)
assert (obj.ErrorCachingMinTTL == data['ErrorCachingMinTTL'])
assert (obj.ErrorCode == data['ErrorCode'])
assert (obj.ResponseCode == data['ResponseCode'])
assert (obj.ResponsePagePath == data['ResponsePagePath'])<|docstring|>Test init.<|endoftext|>
|
6147a5319b42d6e5f2be2545ffbe895399a2559ae91e3bb369509dc12ea84e8d
|
def test_init_extra(self) -> None:
'Test init extra.'
with pytest.raises(ValidationError):
RunwayStaticSiteLambdaFunctionAssociationDataModel(invalid='val')
|
Test init extra.
|
tests/unit/module/staticsite/parameters/test_models.py
|
test_init_extra
|
blade2005/runway
| 134
|
python
|
def test_init_extra(self) -> None:
with pytest.raises(ValidationError):
RunwayStaticSiteLambdaFunctionAssociationDataModel(invalid='val')
|
def test_init_extra(self) -> None:
with pytest.raises(ValidationError):
RunwayStaticSiteLambdaFunctionAssociationDataModel(invalid='val')<|docstring|>Test init extra.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.