hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7902085efe7cf765f12737207cfe1799a58bdf06
| 191
|
py
|
Python
|
ml-agents/mlagents/trainers/__init__.py
|
kazukiave/ml-agents
|
d3157808e757999596f3d514406b38307e441925
|
[
"Apache-2.0"
] | 13,653
|
2017-09-19T15:56:02.000Z
|
2022-03-31T18:55:07.000Z
|
ml-agents/mlagents/trainers/__init__.py
|
yuuharuka/ml-agents
|
8963f72703eb2fd08eed2e4338c07e5b5af1376c
|
[
"Apache-2.0"
] | 3,623
|
2017-09-20T02:50:20.000Z
|
2022-03-31T06:37:25.000Z
|
ml-agents/mlagents/trainers/__init__.py
|
yuuharuka/ml-agents
|
8963f72703eb2fd08eed2e4338c07e5b5af1376c
|
[
"Apache-2.0"
] | 4,130
|
2017-09-19T17:36:34.000Z
|
2022-03-31T12:54:55.000Z
|
# Version of the library that will be used to upload to pypi
__version__ = "0.28.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None
| 31.833333
| 77
| 0.769634
|
__version__ = "0.28.0.dev0"
__release_tag__ = None
| true
| true
|
790208fca1faeed36c22af8e234c55ca9fe70341
| 4,739
|
py
|
Python
|
openslides_backend/main.py
|
peb-adr/openslides-backend
|
f885105446760413f1bce3bde33241f7cb1205ac
|
[
"MIT"
] | null | null | null |
openslides_backend/main.py
|
peb-adr/openslides-backend
|
f885105446760413f1bce3bde33241f7cb1205ac
|
[
"MIT"
] | null | null | null |
openslides_backend/main.py
|
peb-adr/openslides-backend
|
f885105446760413f1bce3bde33241f7cb1205ac
|
[
"MIT"
] | null | null | null |
import logging
import multiprocessing
import os
import signal
import sys
import time
from typing import Any
from datastore.reader.app import register_services
from gunicorn.app.base import BaseApplication
from .shared.env import is_dev_mode
from .shared.interfaces.logging import LoggingModule
from .shared.interfaces.wsgi import WSGIApplication
register_services()
# ATTENTION: We use the Python builtin logging module. To change this use
# something like "import custom_logging as logging".
DEFAULT_ADDRESSES = {
"ActionView": "0.0.0.0:9002",
"PresenterView": "0.0.0.0:9003",
}
class OpenSlidesBackendGunicornApplication(BaseApplication): # pragma: no cover
"""
Standalone application class for Gunicorn. It prepares Gunicorn for using
OpenSlidesBackendWSGIApplication via OpenSlidesBackendWSGIContainer either
with action component or with presenter component.
"""
def __init__(self, view_name: str, *args: Any, **kwargs: Any) -> None:
# Setup global loglevel.
if is_dev_mode():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
self.view_name = view_name
if self.view_name not in ("ActionView", "PresenterView"):
raise ValueError(
f"View name has to be ActionView or PresenterView, not {self.view_name}."
)
logger.debug(f"Create gunicorn application for {self.view_name}.")
super().__init__(*args, **kwargs)
def load_config(self) -> None:
dev_mode = is_dev_mode()
options = {
"bind": DEFAULT_ADDRESSES[self.view_name],
"worker_tmp_dir": "/dev/shm", # See https://pythonspeed.com/articles/gunicorn-in-docker/
"timeout": int(os.environ.get("OPENSLIDES_BACKEND_WORKER_TIMEOUT", "30")),
"loglevel": "debug" if dev_mode else "info",
"reload": dev_mode,
"reload_engine": "auto", # This is the default however.
}
for key, value in options.items():
self.cfg.set(key, value)
def load(self) -> WSGIApplication:
# We import this here so Gunicorn can use its reload feature properly.
from .wsgi import create_wsgi_application
# TODO: Fix this typing problem.
logging_module: LoggingModule = logging # type: ignore
return create_wsgi_application(logging_module, self.view_name)
def start_action_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="ActionView").run()
def start_presenter_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="PresenterView").run()
def start_them_all() -> None: # pragma: no cover
print(
f"Start all components in child processes. Parent process id is {os.getpid()}."
)
processes = {
"action": multiprocessing.Process(target=start_action_server),
"presenter": multiprocessing.Process(target=start_presenter_server),
}
for process in processes.values():
process.start()
def sigterm_handler(signalnum: int, current_stack_frame: Any) -> None:
strsignal = signal.strsignal # type: ignore
print(
f"Parent process {os.getpid()} received {strsignal(signalnum)} "
"signal. Terminate all child processes first."
)
for child in multiprocessing.active_children():
child.terminate()
child.join()
print(f"Parent process {os.getpid()} terminated successfully.")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
while True:
for name, process in processes.items():
if not process.is_alive():
process.join()
print(
f"Component {name} terminated. Terminate all other components now."
)
for other_name, other_process in processes.items():
if name != other_name:
other_process.terminate()
other_process.join()
print("Parent process terminated.")
sys.exit(1)
time.sleep(0.1)
def main() -> None: # pragma: no cover
component = os.environ.get("OPENSLIDES_BACKEND_COMPONENT", "all")
if component == "action":
start_action_server()
elif component == "presenter":
start_presenter_server()
elif component == "all":
start_them_all()
else:
print(
f"Error: OPENSLIDES_BACKEND_COMPONENT must not be {component}.",
file=sys.stderr,
)
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
| 34.591241
| 101
| 0.643807
|
import logging
import multiprocessing
import os
import signal
import sys
import time
from typing import Any
from datastore.reader.app import register_services
from gunicorn.app.base import BaseApplication
from .shared.env import is_dev_mode
from .shared.interfaces.logging import LoggingModule
from .shared.interfaces.wsgi import WSGIApplication
register_services()
DEFAULT_ADDRESSES = {
"ActionView": "0.0.0.0:9002",
"PresenterView": "0.0.0.0:9003",
}
class OpenSlidesBackendGunicornApplication(BaseApplication):
def __init__(self, view_name: str, *args: Any, **kwargs: Any) -> None:
if is_dev_mode():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
self.view_name = view_name
if self.view_name not in ("ActionView", "PresenterView"):
raise ValueError(
f"View name has to be ActionView or PresenterView, not {self.view_name}."
)
logger.debug(f"Create gunicorn application for {self.view_name}.")
super().__init__(*args, **kwargs)
def load_config(self) -> None:
dev_mode = is_dev_mode()
options = {
"bind": DEFAULT_ADDRESSES[self.view_name],
"worker_tmp_dir": "/dev/shm",
"timeout": int(os.environ.get("OPENSLIDES_BACKEND_WORKER_TIMEOUT", "30")),
"loglevel": "debug" if dev_mode else "info",
"reload": dev_mode,
"reload_engine": "auto",
}
for key, value in options.items():
self.cfg.set(key, value)
def load(self) -> WSGIApplication:
from .wsgi import create_wsgi_application
logging_module: LoggingModule = logging
return create_wsgi_application(logging_module, self.view_name)
def start_action_server() -> None:
OpenSlidesBackendGunicornApplication(view_name="ActionView").run()
def start_presenter_server() -> None:
OpenSlidesBackendGunicornApplication(view_name="PresenterView").run()
def start_them_all() -> None:
print(
f"Start all components in child processes. Parent process id is {os.getpid()}."
)
processes = {
"action": multiprocessing.Process(target=start_action_server),
"presenter": multiprocessing.Process(target=start_presenter_server),
}
for process in processes.values():
process.start()
def sigterm_handler(signalnum: int, current_stack_frame: Any) -> None:
strsignal = signal.strsignal
print(
f"Parent process {os.getpid()} received {strsignal(signalnum)} "
"signal. Terminate all child processes first."
)
for child in multiprocessing.active_children():
child.terminate()
child.join()
print(f"Parent process {os.getpid()} terminated successfully.")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
while True:
for name, process in processes.items():
if not process.is_alive():
process.join()
print(
f"Component {name} terminated. Terminate all other components now."
)
for other_name, other_process in processes.items():
if name != other_name:
other_process.terminate()
other_process.join()
print("Parent process terminated.")
sys.exit(1)
time.sleep(0.1)
def main() -> None:
component = os.environ.get("OPENSLIDES_BACKEND_COMPONENT", "all")
if component == "action":
start_action_server()
elif component == "presenter":
start_presenter_server()
elif component == "all":
start_them_all()
else:
print(
f"Error: OPENSLIDES_BACKEND_COMPONENT must not be {component}.",
file=sys.stderr,
)
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
| true
| true
|
7902091b0d4200ad38e755435462aa3350904014
| 6,457
|
py
|
Python
|
tests/demo.py
|
FangTang999/Path4GMNS
|
d319bb4b97a51055c1917820d1f5eaf7b8032a51
|
[
"Apache-2.0"
] | 2
|
2021-06-09T09:41:31.000Z
|
2021-07-21T14:09:34.000Z
|
tests/demo.py
|
zqNiu/Path4GMNS
|
666cb425206d6bfc26135c649253e931bfc28003
|
[
"Apache-2.0"
] | null | null | null |
tests/demo.py
|
zqNiu/Path4GMNS
|
666cb425206d6bfc26135c649253e931bfc28003
|
[
"Apache-2.0"
] | null | null | null |
import path4gmns as pg
from time import time
def test_download_sample_data_sets():
pg.download_sample_data_sets()
def test_find_shortest_path():
load_demand = False
network = pg.read_network(load_demand)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, seq_type='link'))
# retrieve the shortest path under a specific mode (which must be defined
# in settings.yaml)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w'))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w', seq_type='link'))
def test_find_shortest_path_for_agents():
network = pg.read_network()
st = time()
# find agent paths under a specific mode defined in settings.yaml,
# say, w (i.e., walk)
# network.find_path_for_agents('w') or network.find_path_for_agents('walk')
network.find_path_for_agents()
print('\nprocessing time of finding shortest paths for all agents: '
f'{time()-st:.2f} s')
agent_id = 300
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
agent_id = 1000
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
# output unique agent paths to a csv file
# if you do not want to include geometry info in the output file,
# you can do pg.output_agent_paths(network, False)
pg.output_agent_paths(network)
def test_column_generation_py():
network = pg.read_network()
print('\nstart column generation\n')
st = time()
iter_num = 20
column_update_num = 20
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
# if you do not want to include geometry info in the output file,
# use pg.output_columns(network, False)
pg.output_columns(network)
pg.output_link_performance(network)
def test_column_generation_dtalite():
""" validation using DTALite """
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv')
def test_loading_columns():
network = pg.read_network()
print('\nstart loading columns\n')
st = time()
pg.load_columns(network)
print(f'processing time of loading columns: {time()-st:.2f} s')
print('\nstart column generation\n')
st = time()
iter_num = 0
column_update_num = 10
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed in later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
pg.output_columns(network)
pg.output_link_performance(network)
def test_accessibility():
load_demand = False
network = pg.read_network(load_demand)
print('\nstart accessibility evaluation\n')
st = time()
# multimodal accessibility evaluation
pg.evaluate_accessibility(network)
# accessibility evalutation for a target mode
# pg.evaluate_accessibility(network, multimodal=False, mode='p')
print('complete accessibility evaluation.\n')
print(f'processing time of accessibility evaluation: {time()-st:.2f} s')
# get accessible nodes and links starting from node 1 with a 5-minitue
# time window for the default mode auto (i.e., 'p')
network.get_accessible_nodes(1, 5)
network.get_accessible_links(1, 5)
# get accessible nodes and links starting from node 1 with a 15-minitue
# time window for mode walk (i.e., 'w')
network.get_accessible_nodes(1, 15, 'w')
network.get_accessible_links(1, 15, 'w')
def demo_mode(mode):
print(f'the selected mode is {mode}\n')
if mode == 0:
# option 0: download the sample data set from GitHub
test_download_sample_data_sets()
elif mode == 1:
# option 1: find shortest path between O and D on Chicago network
test_find_shortest_path()
elif mode == 2:
# option 2: find shortest paths for all agents on Chicago network
test_find_shortest_path_for_agents()
elif mode == 3:
# option 3: perform column generation using Python engine
# on Chicago network
test_column_generation_py()
elif mode == 4:
# option 4: perform column generation using DTALite on Chicago network
test_column_generation_dtalite()
elif mode == 5:
# option 5: load columns generated from option 3 or 4
# on Chicago network
test_loading_columns()
else:
# option 6: evaluate multimodal accessibility on Chicago network
test_accessibility()
if __name__=="__main__":
demo_mode(6)
| 34.529412
| 79
| 0.678179
|
import path4gmns as pg
from time import time
def test_download_sample_data_sets():
pg.download_sample_data_sets()
def test_find_shortest_path():
load_demand = False
network = pg.read_network(load_demand)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, seq_type='link'))
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w'))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w', seq_type='link'))
def test_find_shortest_path_for_agents():
network = pg.read_network()
st = time()
network.find_path_for_agents()
print('\nprocessing time of finding shortest paths for all agents: '
f'{time()-st:.2f} s')
agent_id = 300
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
agent_id = 1000
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
pg.output_agent_paths(network)
def test_column_generation_py():
network = pg.read_network()
print('\nstart column generation\n')
st = time()
iter_num = 20
column_update_num = 20
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
pg.output_columns(network)
pg.output_link_performance(network)
def test_column_generation_dtalite():
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv')
def test_loading_columns():
network = pg.read_network()
print('\nstart loading columns\n')
st = time()
pg.load_columns(network)
print(f'processing time of loading columns: {time()-st:.2f} s')
print('\nstart column generation\n')
st = time()
iter_num = 0
column_update_num = 10
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
pg.output_columns(network)
pg.output_link_performance(network)
def test_accessibility():
load_demand = False
network = pg.read_network(load_demand)
print('\nstart accessibility evaluation\n')
st = time()
pg.evaluate_accessibility(network)
print('complete accessibility evaluation.\n')
print(f'processing time of accessibility evaluation: {time()-st:.2f} s')
network.get_accessible_nodes(1, 5)
network.get_accessible_links(1, 5)
network.get_accessible_nodes(1, 15, 'w')
network.get_accessible_links(1, 15, 'w')
def demo_mode(mode):
print(f'the selected mode is {mode}\n')
if mode == 0:
test_download_sample_data_sets()
elif mode == 1:
test_find_shortest_path()
elif mode == 2:
test_find_shortest_path_for_agents()
elif mode == 3:
test_column_generation_py()
elif mode == 4:
test_column_generation_dtalite()
elif mode == 5:
test_loading_columns()
else:
test_accessibility()
if __name__=="__main__":
demo_mode(6)
| true
| true
|
79020949305c537ba2dc8a445188490094b20f15
| 2,225
|
py
|
Python
|
src/compas_fab/backends/ros/backend_features/move_it_add_collision_mesh.py
|
gramaziokohler/compas_fab
|
85ec40887004c33ac9764ba73c1b66c6de154457
|
[
"MIT"
] | 8
|
2018-09-09T07:29:03.000Z
|
2019-05-14T18:03:20.000Z
|
src/compas_fab/backends/ros/backend_features/move_it_add_collision_mesh.py
|
gramaziokohler/compas_fab
|
85ec40887004c33ac9764ba73c1b66c6de154457
|
[
"MIT"
] | 39
|
2018-09-18T14:16:39.000Z
|
2019-07-01T08:07:10.000Z
|
src/compas_fab/backends/ros/backend_features/move_it_add_collision_mesh.py
|
gramaziokohler/compas_fab
|
85ec40887004c33ac9764ba73c1b66c6de154457
|
[
"MIT"
] | 7
|
2019-01-20T22:04:49.000Z
|
2019-06-10T16:07:26.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.utilities import await_callback
from compas_fab.backends.interfaces import AddCollisionMesh
from compas_fab.backends.ros.messages import ApplyPlanningSceneRequest
from compas_fab.backends.ros.messages import ApplyPlanningSceneResponse
from compas_fab.backends.ros.messages import CollisionObject
from compas_fab.backends.ros.messages import PlanningScene
from compas_fab.backends.ros.messages import PlanningSceneWorld
from compas_fab.backends.ros.service_description import ServiceDescription
__all__ = [
'MoveItAddCollisionMesh',
]
class MoveItAddCollisionMesh(AddCollisionMesh):
"""Callable to add a collision mesh to the planning scene."""
APPLY_PLANNING_SCENE = ServiceDescription('/apply_planning_scene',
'ApplyPlanningScene',
ApplyPlanningSceneRequest,
ApplyPlanningSceneResponse,
)
def __init__(self, ros_client):
self.ros_client = ros_client
def add_collision_mesh(self, collision_mesh, options=None):
"""Add a collision mesh to the planning scene.
Parameters
----------
collision_mesh : :class:`compas_fab.robots.CollisionMesh`
Object containing the collision mesh to be added.
options : dict, optional
Unused parameter.
Returns
-------
``None``
"""
kwargs = {}
kwargs['collision_mesh'] = collision_mesh
kwargs['errback_name'] = 'errback'
return await_callback(self.add_collision_mesh_async, **kwargs)
def add_collision_mesh_async(self, callback, errback, collision_mesh):
co = CollisionObject.from_collision_mesh(collision_mesh)
co.operation = CollisionObject.ADD
world = PlanningSceneWorld(collision_objects=[co])
scene = PlanningScene(world=world, is_diff=True)
request = scene.to_request(self.ros_client.ros_distro)
self.APPLY_PLANNING_SCENE(self.ros_client, request, callback, errback)
| 38.362069
| 78
| 0.681798
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.utilities import await_callback
from compas_fab.backends.interfaces import AddCollisionMesh
from compas_fab.backends.ros.messages import ApplyPlanningSceneRequest
from compas_fab.backends.ros.messages import ApplyPlanningSceneResponse
from compas_fab.backends.ros.messages import CollisionObject
from compas_fab.backends.ros.messages import PlanningScene
from compas_fab.backends.ros.messages import PlanningSceneWorld
from compas_fab.backends.ros.service_description import ServiceDescription
__all__ = [
'MoveItAddCollisionMesh',
]
class MoveItAddCollisionMesh(AddCollisionMesh):
APPLY_PLANNING_SCENE = ServiceDescription('/apply_planning_scene',
'ApplyPlanningScene',
ApplyPlanningSceneRequest,
ApplyPlanningSceneResponse,
)
def __init__(self, ros_client):
self.ros_client = ros_client
def add_collision_mesh(self, collision_mesh, options=None):
kwargs = {}
kwargs['collision_mesh'] = collision_mesh
kwargs['errback_name'] = 'errback'
return await_callback(self.add_collision_mesh_async, **kwargs)
def add_collision_mesh_async(self, callback, errback, collision_mesh):
co = CollisionObject.from_collision_mesh(collision_mesh)
co.operation = CollisionObject.ADD
world = PlanningSceneWorld(collision_objects=[co])
scene = PlanningScene(world=world, is_diff=True)
request = scene.to_request(self.ros_client.ros_distro)
self.APPLY_PLANNING_SCENE(self.ros_client, request, callback, errback)
| true
| true
|
7902099685f65f73855df2c06e5d7f3c20b72cbd
| 393
|
py
|
Python
|
leetcode/python/easy/p1047_removeDuplicates.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/easy/p1047_removeDuplicates.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/easy/p1047_removeDuplicates.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def removeDuplicates(self, S: str) -> str:
i = 1
while i < len(S):
if i <= 0:
i += 1
continue
if S[i] == S[i - 1]:
S = S[:i - 1] + S[i + 1:]
i = i - 1
else:
i = i + 1
return S
slu = Solution()
print(slu.removeDuplicates("abbaca"))
| 21.833333
| 46
| 0.358779
|
class Solution:
def removeDuplicates(self, S: str) -> str:
i = 1
while i < len(S):
if i <= 0:
i += 1
continue
if S[i] == S[i - 1]:
S = S[:i - 1] + S[i + 1:]
i = i - 1
else:
i = i + 1
return S
slu = Solution()
print(slu.removeDuplicates("abbaca"))
| true
| true
|
79020b75eb3cfa7527e60cb4bf3ab7450ecee76f
| 2,766
|
py
|
Python
|
copasi/bindings/python/unittests/Test_CFunctionParameter.py
|
MedAnisse/COPASI
|
561f591f8231b1c4880ce554d0197ff21ef4734c
|
[
"Artistic-2.0"
] | 64
|
2015-03-14T14:06:18.000Z
|
2022-02-04T23:19:08.000Z
|
copasi/bindings/python/unittests/Test_CFunctionParameter.py
|
MedAnisse/COPASI
|
561f591f8231b1c4880ce554d0197ff21ef4734c
|
[
"Artistic-2.0"
] | 4
|
2017-08-16T10:26:46.000Z
|
2020-01-08T12:05:54.000Z
|
copasi/bindings/python/unittests/Test_CFunctionParameter.py
|
MedAnisse/COPASI
|
561f591f8231b1c4880ce554d0197ff21ef4734c
|
[
"Artistic-2.0"
] | 28
|
2015-04-16T14:14:59.000Z
|
2022-03-28T12:04:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CFunctionParameter(unittest.TestCase):
def setUp(self):
self.functions=COPASI.CRootContainer.getFunctionList()
self.function=self.functions.findFunction("Iso Uni Uni")
self.assert_(self.function!=None)
self.assert_(self.function.__class__==COPASI.CFunction)
self.parameters=self.function.getVariables()
self.assert_(self.parameters!=None)
self.assert_(self.parameters.__class__==COPASI.CFunctionParameters)
index=self.parameters.findParameterByName("Keq",COPASI.CFunctionParameter.DataType_FLOAT64)
self.parameter=self.parameters.getParameter(index)
self.assert_(self.parameter!=None)
self.assert_(self.parameter.__class__==COPASI.CFunctionParameter)
def test_getKey(self):
key=self.parameter.getKey()
self.assert_(type(key)==str)
def test_getType(self):
b=self.parameter.getType()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.DataType_FLOAT64)
def test_setType(self):
t=COPASI.CFunctionParameter.DataType_INT32
self.parameter.setType(t)
self.assert_(self.parameter.getType()==t)
def test_getUsage(self):
b=self.parameter.getUsage()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.Role_PARAMETER)
def test_setUsage(self):
t=COPASI.CFunctionParameter.Role_VOLUME
self.parameter.setUsage(t)
self.assert_(self.parameter.getUsage()==t)
def suite():
tests=[
"test_getKey"
,"test_getType"
,"test_setType"
,"test_getUsage"
,"test_setUsage"
]
return unittest.TestSuite(map(Test_CFunctionParameter,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| 32.162791
| 95
| 0.721981
|
import COPASI
import unittest
from types import *
class Test_CFunctionParameter(unittest.TestCase):
def setUp(self):
self.functions=COPASI.CRootContainer.getFunctionList()
self.function=self.functions.findFunction("Iso Uni Uni")
self.assert_(self.function!=None)
self.assert_(self.function.__class__==COPASI.CFunction)
self.parameters=self.function.getVariables()
self.assert_(self.parameters!=None)
self.assert_(self.parameters.__class__==COPASI.CFunctionParameters)
index=self.parameters.findParameterByName("Keq",COPASI.CFunctionParameter.DataType_FLOAT64)
self.parameter=self.parameters.getParameter(index)
self.assert_(self.parameter!=None)
self.assert_(self.parameter.__class__==COPASI.CFunctionParameter)
def test_getKey(self):
key=self.parameter.getKey()
self.assert_(type(key)==str)
def test_getType(self):
b=self.parameter.getType()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.DataType_FLOAT64)
def test_setType(self):
t=COPASI.CFunctionParameter.DataType_INT32
self.parameter.setType(t)
self.assert_(self.parameter.getType()==t)
def test_getUsage(self):
b=self.parameter.getUsage()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.Role_PARAMETER)
def test_setUsage(self):
t=COPASI.CFunctionParameter.Role_VOLUME
self.parameter.setUsage(t)
self.assert_(self.parameter.getUsage()==t)
def suite():
tests=[
"test_getKey"
,"test_getType"
,"test_setType"
,"test_getUsage"
,"test_setUsage"
]
return unittest.TestSuite(map(Test_CFunctionParameter,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| true
| true
|
79020c5a6767fbe0484883366125346815d1d434
| 3,438
|
py
|
Python
|
atm/config.py
|
moeyensj/atm
|
0523600cf44423a1ef72ca40fff29bbfbe1281a8
|
[
"BSD-3-Clause"
] | 10
|
2019-05-04T01:02:16.000Z
|
2021-12-29T11:20:23.000Z
|
atm/config.py
|
moeyensj/atm
|
0523600cf44423a1ef72ca40fff29bbfbe1281a8
|
[
"BSD-3-Clause"
] | 22
|
2019-04-26T03:17:24.000Z
|
2021-03-03T23:38:02.000Z
|
atm/config.py
|
moeyensj/atm
|
0523600cf44423a1ef72ca40fff29bbfbe1281a8
|
[
"BSD-3-Clause"
] | 2
|
2019-09-23T05:52:18.000Z
|
2021-12-29T11:20:21.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
__all__ = ["Config"]
class Config(object):
"""
Config: Holds configuration settings.
Parameters
----------
fitParameters : list
Parameters to fit.
parameterPriors : dict
Dictionary with parameters as keys, and a dictionary
as the value for each key. This dictionary is called
to setup the pymc3 priors for each parameter not in
fitParameters.
columnMapping : dict
This dictionary should define the
column names of the user's data relative to the
internally used names.
tableParameterLimits : dict
This is dictionary is called
when building model tables to set the grid in subsolar
temperature and phase angle. It should have 'T_ss' and
'alpha' as keys. Values should be a list:
the first element should be another list
with the lower and upper bounds, the second element
should be the step size.
threads : int
The number of threads to use when bulding model tables
and running the multi-fit script.
numSamples : int
Number of samples to draw from the posterior distribution.
numBurnIn : int
Number of the drawn samples to discard from summary statistics
and plotting.
numChains : int
Number of Markov chains to sample the posterior distribution.
phaseAngleFluxCorrection : float
The default value to correct for phase-angle effects in the
Standard Thermal Model. The canonical value is 0.01.
verbose : bool
Print progress statements?
"""
fitParameters = ["logT1", "logD", "eps"]
parameterPriors = {
"logD": {
"lower": 1,
"upper": 8,
},
"eps": {
"lower": 0.0,
"upper": 1.0},
"logT1": {
"lower": 0.01,
"upper": 5,
},
"T_ss": {
"lower": 10,
"upper": 1200.0
},
"alpha_rad": {
"lower": 0,
"upper": np.pi
},
"r_au": {
"lower": 0,
"upper": 10
},
"delta_au": {
"lower": 0,
"upper": 10
},
"G": {
"lower": 0,
"upper": 1},
"p": {
"lower": 0,
"upper": 5
},
"eta": {
"lower": 0,
"upper": 10
}
}
columnMapping = {
"designation" : "designation",
"obs_id": "obs_id",
"exp_mjd": "mjd",
"r_au": "r_au",
"delta_au": "delta_au",
"alpha_rad": "alpha_rad",
"G": "G",
"logD": "logD",
"logT1" : "logT1",
"eta": "eta",
"eps": "eps",
"flux_si": ["flux_W1_si", "flux_W2_si", "flux_W3_si", "flux_W4_si"],
"fluxErr_si": ["fluxErr_W1_si", "fluxErr_W2_si", "fluxErr_W3_si", "fluxErr_W4_si"],
"mag" : ["mag_W1", "mag_W2", "mag_W3", "mag_W4"],
"magErr" : ["magErr_W1", "magErr_W2", "magErr_W3", "magErr_W4"]
}
tableParameterLimits = {
"T_ss": [[100.0, 1200.0], 0.5],
"alpha": [[0.0, np.pi], np.pi/360]
}
threads = 10
samples = 2500
burnInSamples = 500
chains = 20
phaseAngleFluxCorrection = 0.01
verbose = True
| 28.180328
| 91
| 0.513962
|
import numpy as np
__all__ = ["Config"]
class Config(object):
fitParameters = ["logT1", "logD", "eps"]
parameterPriors = {
"logD": {
"lower": 1,
"upper": 8,
},
"eps": {
"lower": 0.0,
"upper": 1.0},
"logT1": {
"lower": 0.01,
"upper": 5,
},
"T_ss": {
"lower": 10,
"upper": 1200.0
},
"alpha_rad": {
"lower": 0,
"upper": np.pi
},
"r_au": {
"lower": 0,
"upper": 10
},
"delta_au": {
"lower": 0,
"upper": 10
},
"G": {
"lower": 0,
"upper": 1},
"p": {
"lower": 0,
"upper": 5
},
"eta": {
"lower": 0,
"upper": 10
}
}
columnMapping = {
"designation" : "designation",
"obs_id": "obs_id",
"exp_mjd": "mjd",
"r_au": "r_au",
"delta_au": "delta_au",
"alpha_rad": "alpha_rad",
"G": "G",
"logD": "logD",
"logT1" : "logT1",
"eta": "eta",
"eps": "eps",
"flux_si": ["flux_W1_si", "flux_W2_si", "flux_W3_si", "flux_W4_si"],
"fluxErr_si": ["fluxErr_W1_si", "fluxErr_W2_si", "fluxErr_W3_si", "fluxErr_W4_si"],
"mag" : ["mag_W1", "mag_W2", "mag_W3", "mag_W4"],
"magErr" : ["magErr_W1", "magErr_W2", "magErr_W3", "magErr_W4"]
}
tableParameterLimits = {
"T_ss": [[100.0, 1200.0], 0.5],
"alpha": [[0.0, np.pi], np.pi/360]
}
threads = 10
samples = 2500
burnInSamples = 500
chains = 20
phaseAngleFluxCorrection = 0.01
verbose = True
| true
| true
|
79020c73a919eecb49bc05ec9523af8e6add3076
| 1,457
|
py
|
Python
|
recipe_scrapers/simplyquinoa.py
|
hotfix/recipe-scrapers
|
0dd87366f137c32f348d14695af8cc4c20d455a7
|
[
"MIT"
] | 1
|
2020-08-18T16:45:17.000Z
|
2020-08-18T16:45:17.000Z
|
recipe_scrapers/simplyquinoa.py
|
hotfix/recipe-scrapers
|
0dd87366f137c32f348d14695af8cc4c20d455a7
|
[
"MIT"
] | null | null | null |
recipe_scrapers/simplyquinoa.py
|
hotfix/recipe-scrapers
|
0dd87366f137c32f348d14695af8cc4c20d455a7
|
[
"MIT"
] | null | null | null |
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class SimplyQuinoa(AbstractScraper):
@classmethod
def host(self):
return 'simplyquinoa.com'
def title(self):
return self.soup.find(
'h2',
{'class': 'wprm-recipe-name'}
).get_text()
def total_time(self):
return get_minutes(self.soup.find(
'span',
{'class': 'wprm-recipe-total_time'}).parent
)
def yields(self):
yields = self.soup.find(
'span',
{'class': 'wprm-recipe-servings'}
).get_text()
return get_yields("{} servings".format(yields))
def ingredients(self):
ingredients = self.soup.findAll(
'li',
{'class': 'wprm-recipe-ingredient'}
)
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
def instructions(self):
instructions = self.soup.findAll(
'div',
{'class': 'wprm-recipe-instruction-text'}
)
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions
])
def ratings(self):
return round(float(
self.soup.find(
"span",
{"class": "wprm-recipe-rating-average"}
).get_text()), 2
)
| 24.694915
| 61
| 0.533288
|
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class SimplyQuinoa(AbstractScraper):
@classmethod
def host(self):
return 'simplyquinoa.com'
def title(self):
return self.soup.find(
'h2',
{'class': 'wprm-recipe-name'}
).get_text()
def total_time(self):
return get_minutes(self.soup.find(
'span',
{'class': 'wprm-recipe-total_time'}).parent
)
def yields(self):
yields = self.soup.find(
'span',
{'class': 'wprm-recipe-servings'}
).get_text()
return get_yields("{} servings".format(yields))
def ingredients(self):
ingredients = self.soup.findAll(
'li',
{'class': 'wprm-recipe-ingredient'}
)
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
def instructions(self):
instructions = self.soup.findAll(
'div',
{'class': 'wprm-recipe-instruction-text'}
)
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions
])
def ratings(self):
return round(float(
self.soup.find(
"span",
{"class": "wprm-recipe-rating-average"}
).get_text()), 2
)
| true
| true
|
79020d407f7d2dd513661ed3a9e770c03db64696
| 7,196
|
py
|
Python
|
Functions/visibility_functions.py
|
LorenzWieseke/GLBTextureTools
|
88bfa0e8eef7308573cd43d02b1ddc4dc89be8f7
|
[
"MIT"
] | 9
|
2020-03-13T21:39:27.000Z
|
2022-03-30T20:48:46.000Z
|
Functions/visibility_functions.py
|
LorenzWieseke/GLBTextureTools
|
88bfa0e8eef7308573cd43d02b1ddc4dc89be8f7
|
[
"MIT"
] | 1
|
2021-04-09T12:55:27.000Z
|
2021-04-09T12:55:27.000Z
|
Functions/visibility_functions.py
|
LorenzWieseke/GLBTextureTools
|
88bfa0e8eef7308573cd43d02b1ddc4dc89be8f7
|
[
"MIT"
] | null | null | null |
import bpy
from bpy import context
from . import node_functions
from . import material_functions
from . import constants
import mathutils
def update_selected_image(self, context):
sel_texture = bpy.data.images[self.texture_index]
show_image_in_image_editor(sel_texture)
def show_image_in_image_editor(image):
for area in bpy.context.screen.areas:
if area.type == 'IMAGE_EDITOR':
area.spaces.active.image = image
def switch_baked_material(show_bake_material,affect):
current_bake_type = bpy.context.scene.bake_settings.get_current_bake_type()
material_name_suffix = constants.Material_Suffix.bake_type_mat_suffix[current_bake_type]
# on what object to work
if affect == 'active':
objects = [bpy.context.active_object]
elif affect == 'selected':
objects = bpy.context.selected_editable_objects
elif affect == 'visible':
objects = [ob for ob in bpy.context.view_layer.objects if ob.visible_get()]
elif affect == 'scene':
objects = bpy.context.scene.objects
all_mats = bpy.data.materials
baked_mats = [mat for mat in all_mats if material_name_suffix in mat.name]
for obj in objects:
if current_bake_type != "pbr":
baked_ao_flag = getattr(obj,"ao_map_name") != '' or getattr(obj,"lightmap_name") != ''
if not baked_ao_flag:
continue
for slot in obj.material_slots:
if show_bake_material:
for baked_mat in baked_mats:
if baked_mat.name == slot.material.name + material_name_suffix + obj.bake_version:
slot.material = baked_mat
else:
if (material_name_suffix in slot.material.name):
bake_material = slot.material
index = bake_material.name.find(material_name_suffix)
org_mat = all_mats.get(bake_material.name[0:index])
if org_mat is not None:
slot.material = org_mat
def preview_bake_texture(self,context):
context = bpy.context
bake_settings = context.scene.bake_settings
preview_bake_texture = context.scene.texture_settings.preview_bake_texture
vis_mats = material_functions.get_all_visible_materials()
for mat in vis_mats:
if not mat.node_tree:
continue
nodes = mat.node_tree.nodes
bake_texture_node = None
if bake_settings.lightmap_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_lightmap)
elif bake_settings.ao_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_ao)
if bake_texture_node is not None:
if preview_bake_texture:
node_functions.emission_setup(mat, bake_texture_node.outputs["Color"])
else:
pbr_node = node_functions.get_nodes_by_type(nodes, constants.Node_Types.pbr_node)
if len(pbr_node) == 0:
return
pbr_node = pbr_node[0]
node_functions.remove_node(mat, "Emission Bake")
node_functions.reconnect_PBR(mat, pbr_node)
def preview_lightmap(self, context):
preview_lightmap = context.scene.texture_settings.preview_lightmap
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
pbr_node = node_functions.get_pbr_node(material)
if pbr_node is None:
print("\n " + material.name + " has no PBR Node \n")
continue
base_color_input = node_functions.get_pbr_inputs(pbr_node)["base_color_input"]
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if preview_lightmap:
# add mix node
mix_node_name = "Mulitply Lightmap"
mix_node = node_functions.add_node(material,constants.Shader_Node_Types.mix, mix_node_name)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs[0].default_value = 1 # set factor to 1
pos_offset = mathutils.Vector((-200, 200))
mix_node.location = pbr_node.location + pos_offset
mix_node_input1 = mix_node.inputs["Color1"]
mix_node_input2 = mix_node.inputs["Color2"]
mix_node_output = mix_node.outputs["Color"]
# image texture in base color
if base_color_input.is_linked:
node_before_base_color = base_color_input.links[0].from_node
if not node_before_base_color.name == mix_node_name:
node_functions.make_link(material, node_before_base_color.outputs["Color"], mix_node_input1)
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
else :
mix_node_input1.default_value = base_color_input.default_value
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
node_functions.remove_link(material,lightmap_output,emission_input)
if not preview_lightmap:
# remove mix and reconnect base color
mix_node = nodes.get("Mulitply Lightmap")
if mix_node is not None:
color_input_connections = len(mix_node.inputs["Color1"].links)
if (color_input_connections == 0):
node_functions.remove_node(material,mix_node.name)
else:
node_functions.remove_reconnect_node(material,mix_node.name)
node_functions.link_pbr_to_output(material,pbr_node)
def lightmap_to_emission(self, context, connect):
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
pbr_node = node_functions.get_pbr_node(material)
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if connect:
node_functions.make_link(material, lightmap_output, emission_input)
else:
node_functions.remove_link(material,lightmap_output,emission_input)
| 38.481283
| 116
| 0.618677
|
import bpy
from bpy import context
from . import node_functions
from . import material_functions
from . import constants
import mathutils
def update_selected_image(self, context):
sel_texture = bpy.data.images[self.texture_index]
show_image_in_image_editor(sel_texture)
def show_image_in_image_editor(image):
for area in bpy.context.screen.areas:
if area.type == 'IMAGE_EDITOR':
area.spaces.active.image = image
def switch_baked_material(show_bake_material,affect):
current_bake_type = bpy.context.scene.bake_settings.get_current_bake_type()
material_name_suffix = constants.Material_Suffix.bake_type_mat_suffix[current_bake_type]
if affect == 'active':
objects = [bpy.context.active_object]
elif affect == 'selected':
objects = bpy.context.selected_editable_objects
elif affect == 'visible':
objects = [ob for ob in bpy.context.view_layer.objects if ob.visible_get()]
elif affect == 'scene':
objects = bpy.context.scene.objects
all_mats = bpy.data.materials
baked_mats = [mat for mat in all_mats if material_name_suffix in mat.name]
for obj in objects:
if current_bake_type != "pbr":
baked_ao_flag = getattr(obj,"ao_map_name") != '' or getattr(obj,"lightmap_name") != ''
if not baked_ao_flag:
continue
for slot in obj.material_slots:
if show_bake_material:
for baked_mat in baked_mats:
if baked_mat.name == slot.material.name + material_name_suffix + obj.bake_version:
slot.material = baked_mat
else:
if (material_name_suffix in slot.material.name):
bake_material = slot.material
index = bake_material.name.find(material_name_suffix)
org_mat = all_mats.get(bake_material.name[0:index])
if org_mat is not None:
slot.material = org_mat
def preview_bake_texture(self,context):
context = bpy.context
bake_settings = context.scene.bake_settings
preview_bake_texture = context.scene.texture_settings.preview_bake_texture
vis_mats = material_functions.get_all_visible_materials()
for mat in vis_mats:
if not mat.node_tree:
continue
nodes = mat.node_tree.nodes
bake_texture_node = None
if bake_settings.lightmap_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_lightmap)
elif bake_settings.ao_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_ao)
if bake_texture_node is not None:
if preview_bake_texture:
node_functions.emission_setup(mat, bake_texture_node.outputs["Color"])
else:
pbr_node = node_functions.get_nodes_by_type(nodes, constants.Node_Types.pbr_node)
if len(pbr_node) == 0:
return
pbr_node = pbr_node[0]
node_functions.remove_node(mat, "Emission Bake")
node_functions.reconnect_PBR(mat, pbr_node)
def preview_lightmap(self, context):
preview_lightmap = context.scene.texture_settings.preview_lightmap
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
pbr_node = node_functions.get_pbr_node(material)
if pbr_node is None:
print("\n " + material.name + " has no PBR Node \n")
continue
base_color_input = node_functions.get_pbr_inputs(pbr_node)["base_color_input"]
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if preview_lightmap:
mix_node_name = "Mulitply Lightmap"
mix_node = node_functions.add_node(material,constants.Shader_Node_Types.mix, mix_node_name)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs[0].default_value = 1
pos_offset = mathutils.Vector((-200, 200))
mix_node.location = pbr_node.location + pos_offset
mix_node_input1 = mix_node.inputs["Color1"]
mix_node_input2 = mix_node.inputs["Color2"]
mix_node_output = mix_node.outputs["Color"]
if base_color_input.is_linked:
node_before_base_color = base_color_input.links[0].from_node
if not node_before_base_color.name == mix_node_name:
node_functions.make_link(material, node_before_base_color.outputs["Color"], mix_node_input1)
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
else :
mix_node_input1.default_value = base_color_input.default_value
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
node_functions.remove_link(material,lightmap_output,emission_input)
if not preview_lightmap:
mix_node = nodes.get("Mulitply Lightmap")
if mix_node is not None:
color_input_connections = len(mix_node.inputs["Color1"].links)
if (color_input_connections == 0):
node_functions.remove_node(material,mix_node.name)
else:
node_functions.remove_reconnect_node(material,mix_node.name)
node_functions.link_pbr_to_output(material,pbr_node)
def lightmap_to_emission(self, context, connect):
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
pbr_node = node_functions.get_pbr_node(material)
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if connect:
node_functions.make_link(material, lightmap_output, emission_input)
else:
node_functions.remove_link(material,lightmap_output,emission_input)
| true
| true
|
79020e3b97923b38276d349876f359df32550754
| 1,623
|
py
|
Python
|
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | 21
|
2017-03-17T16:34:33.000Z
|
2022-03-12T14:52:40.000Z
|
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | null | null | null |
classes/jogwidget.py
|
comgram/gerbil_gui
|
bacec0047bc2de6bf95b1734af6845896a04aeff
|
[
"MIT"
] | 7
|
2019-06-08T19:45:23.000Z
|
2022-01-04T02:44:41.000Z
|
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
class JogWidget(QWidget):
def __init__(self, parent, callback):
super(JogWidget, self).__init__(parent)
self.parent = parent
self.callback = callback
self.wx_current = 0
self.wy_current = 0
self.wz_current = 0
self._x_start_screen = 0
self._y_start_screen = 0
self._z_accumulator = 0
def onIdle(self):
self._z_accumulator = 0
def mousePressEvent(self, event):
pos = event.pos()
self._x_start_screen = pos.x()
self._y_start_screen = pos.y()
self._relative_origin_x = self.wx_current
self._relative_origin_y = self.wy_current
def mouseReleaseEvent(self, event):
"""
Safe Feed
"""
pass
#self.callback("F111")
def wheelEvent(self, event):
delta = event.angleDelta().y()
self._z_accumulator += delta
z_goto = self.wz_current + self._z_accumulator / 1000
self.callback("G1 Z{:0.2f} F100".format(z_goto))
def mouseMoveEvent(self, event):
pos = event.pos()
x_current_screen = pos.x()
y_current_screen = pos.y()
x_goto = self._relative_origin_x + (x_current_screen - self._x_start_screen) / 20
y_goto = self._relative_origin_y + (self._y_start_screen - y_current_screen) / 20
self.callback("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
#print("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
| 30.622642
| 89
| 0.585952
|
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
class JogWidget(QWidget):
def __init__(self, parent, callback):
super(JogWidget, self).__init__(parent)
self.parent = parent
self.callback = callback
self.wx_current = 0
self.wy_current = 0
self.wz_current = 0
self._x_start_screen = 0
self._y_start_screen = 0
self._z_accumulator = 0
def onIdle(self):
self._z_accumulator = 0
def mousePressEvent(self, event):
pos = event.pos()
self._x_start_screen = pos.x()
self._y_start_screen = pos.y()
self._relative_origin_x = self.wx_current
self._relative_origin_y = self.wy_current
def mouseReleaseEvent(self, event):
pass
def wheelEvent(self, event):
delta = event.angleDelta().y()
self._z_accumulator += delta
z_goto = self.wz_current + self._z_accumulator / 1000
self.callback("G1 Z{:0.2f} F100".format(z_goto))
def mouseMoveEvent(self, event):
pos = event.pos()
x_current_screen = pos.x()
y_current_screen = pos.y()
x_goto = self._relative_origin_x + (x_current_screen - self._x_start_screen) / 20
y_goto = self._relative_origin_y + (self._y_start_screen - y_current_screen) / 20
self.callback("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
| true
| true
|
79020f1257c17ad8e9a1448327db4009b9a53be4
| 967
|
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltEgammaHcalPFClusterIsoUnseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltEgammaHcalPFClusterIsoUnseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltEgammaHcalPFClusterIsoUnseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
hltEgammaHcalPFClusterIsoUnseeded = cms.EDProducer("EgammaHLTHcalPFClusterIsolationProducer",
absEtaLowEdges = cms.vdouble(0.0, 1.479),
doRhoCorrection = cms.bool(False),
drMax = cms.double(0.3),
drVetoBarrel = cms.double(0.0),
drVetoEndcap = cms.double(0.0),
effectiveAreas = cms.vdouble(0.2, 0.25),
energyBarrel = cms.double(0.0),
energyEndcap = cms.double(0.0),
etaStripBarrel = cms.double(0.0),
etaStripEndcap = cms.double(0.0),
pfClusterProducerHCAL = cms.InputTag("hltParticleFlowClusterHCALForEgamma"),
pfClusterProducerHFEM = cms.InputTag(""),
pfClusterProducerHFHAD = cms.InputTag(""),
recoEcalCandidateProducer = cms.InputTag("hltEgammaCandidatesUnseeded"),
rhoMax = cms.double(99999999.0),
rhoProducer = cms.InputTag("hltFixedGridRhoFastjetAllCaloForEGamma"),
rhoScale = cms.double(1.0),
useEt = cms.bool(True),
useHF = cms.bool(False)
)
| 40.291667
| 93
| 0.718718
|
import FWCore.ParameterSet.Config as cms
hltEgammaHcalPFClusterIsoUnseeded = cms.EDProducer("EgammaHLTHcalPFClusterIsolationProducer",
absEtaLowEdges = cms.vdouble(0.0, 1.479),
doRhoCorrection = cms.bool(False),
drMax = cms.double(0.3),
drVetoBarrel = cms.double(0.0),
drVetoEndcap = cms.double(0.0),
effectiveAreas = cms.vdouble(0.2, 0.25),
energyBarrel = cms.double(0.0),
energyEndcap = cms.double(0.0),
etaStripBarrel = cms.double(0.0),
etaStripEndcap = cms.double(0.0),
pfClusterProducerHCAL = cms.InputTag("hltParticleFlowClusterHCALForEgamma"),
pfClusterProducerHFEM = cms.InputTag(""),
pfClusterProducerHFHAD = cms.InputTag(""),
recoEcalCandidateProducer = cms.InputTag("hltEgammaCandidatesUnseeded"),
rhoMax = cms.double(99999999.0),
rhoProducer = cms.InputTag("hltFixedGridRhoFastjetAllCaloForEGamma"),
rhoScale = cms.double(1.0),
useEt = cms.bool(True),
useHF = cms.bool(False)
)
| true
| true
|
79020f30ce61b16c57dac6e6685ac888f80c5f71
| 91,488
|
py
|
Python
|
Compiler/instructions_tmp.py
|
nec-mpc/SPDZ-2-Float
|
70ef8bb00cd356c5cb91c1754637559a3e3dd60a
|
[
"BSD-4-Clause-UC"
] | 4
|
2021-02-18T07:52:01.000Z
|
2022-02-18T06:28:51.000Z
|
Compiler/instructions_tmp.py
|
nec-mpc/SPDZ-2-Float
|
70ef8bb00cd356c5cb91c1754637559a3e3dd60a
|
[
"BSD-4-Clause-UC"
] | null | null | null |
Compiler/instructions_tmp.py
|
nec-mpc/SPDZ-2-Float
|
70ef8bb00cd356c5cb91c1754637559a3e3dd60a
|
[
"BSD-4-Clause-UC"
] | 1
|
2021-08-04T07:56:03.000Z
|
2021-08-04T07:56:03.000Z
|
# Confidential:
# (C) 2017 University of Bristol. See License.txt
""" This module is for classes of actual assembly instructions.
All base classes, utility functions etc. should go in
instructions_base.py instead. This is for two reasons:
1) Easier generation of documentation
2) Ensures that 'from instructions import *' will only import assembly
instructions and nothing else.
Note: every instruction should have a suitable docstring for auto-generation of
documentation
"""
import itertools
import tools
from random import randint
from Compiler.config import *
from Compiler.exceptions import *
import Compiler.instructions_base as base
import math
#import ConfigParser
# avoid naming collision with input instruction
_python_input = input
###
### Load and store instructions
###
@base.gf2n
@base.vectorize
class ldi(base.Instruction):
r""" Assigns register $c_i$ the value $n$. """
__slots__ = []
code = base.opcodes['LDI']
arg_format = ['cw','i']
def execute(self):
self.args[0].value = self.args[1]
@base.gf2n
@base.vectorize
class ldsi(base.Instruction):
r""" Assigns register $s_i$ a share of the value $n$. """
__slots__ = []
code = base.opcodes['LDSI']
arg_format = ['sw','i']
def execute(self):
self.args[0].value = self.args[1]
@base.gf2n
@base.vectorize
class ldmc(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
r""" Assigns register $c_i$ the value in memory \verb+C[n]+. """
__slots__ = ["code"]
code = base.opcodes['LDMC']
arg_format = ['cw','int']
def execute(self):
self.args[0].value = program.mem_c[self.args[1]]
@base.gf2n
@base.vectorize
class ldms(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
r""" Assigns register $s_i$ the value in memory \verb+S[n]+. """
__slots__ = ["code"]
code = base.opcodes['LDMS']
arg_format = ['sw','int']
def execute(self):
self.args[0].value = program.mem_s[self.args[1]]
@base.gf2n
@base.vectorize
class stmc(base.DirectMemoryWriteInstruction):
r""" Sets \verb+C[n]+ to be the value $c_i$. """
__slots__ = ["code"]
code = base.opcodes['STMC']
arg_format = ['c','int']
def execute(self):
program.mem_c[self.args[1]] = self.args[0].value
@base.gf2n
@base.vectorize
class stms(base.DirectMemoryWriteInstruction):
r""" Sets \verb+S[n]+ to be the value $s_i$. """
__slots__ = ["code"]
code = base.opcodes['STMS']
arg_format = ['s','int']
def execute(self):
program.mem_s[self.args[1]] = self.args[0].value
@base.vectorize
class ldmint(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
r""" Assigns register $ci_i$ the value in memory \verb+Ci[n]+. """
__slots__ = ["code"]
code = base.opcodes['LDMINT']
arg_format = ['ciw','int']
def execute(self):
self.args[0].value = program.mem_i[self.args[1]]
@base.vectorize
class stmint(base.DirectMemoryWriteInstruction):
r""" Sets \verb+Ci[n]+ to be the value $ci_i$. """
__slots__ = ["code"]
code = base.opcodes['STMINT']
arg_format = ['ci','int']
def execute(self):
program.mem_i[self.args[1]] = self.args[0].value
# must have seperate instructions because address is always modp
@base.vectorize
class ldmci(base.ReadMemoryInstruction):
r""" Assigns register $c_i$ the value in memory \verb+C[cj]+. """
code = base.opcodes['LDMCI']
arg_format = ['cw','ci']
def execute(self):
self.args[0].value = program.mem_c[self.args[1].value]
@base.vectorize
class ldmsi(base.ReadMemoryInstruction):
r""" Assigns register $s_i$ the value in memory \verb+S[cj]+. """
code = base.opcodes['LDMSI']
arg_format = ['sw','ci']
def execute(self):
self.args[0].value = program.mem_s[self.args[1].value]
@base.vectorize
class stmci(base.WriteMemoryInstruction):
r""" Sets \verb+C[cj]+ to be the value $c_i$. """
code = base.opcodes['STMCI']
arg_format = ['c','ci']
def execute(self):
program.mem_c[self.args[1].value] = self.args[0].value
@base.vectorize
class stmsi(base.WriteMemoryInstruction):
r""" Sets \verb+S[cj]+ to be the value $s_i$. """
code = base.opcodes['STMSI']
arg_format = ['s','ci']
def execute(self):
program.mem_s[self.args[1].value] = self.args[0].value
@base.vectorize
class ldminti(base.ReadMemoryInstruction):
r""" Assigns register $ci_i$ the value in memory \verb+Ci[cj]+. """
code = base.opcodes['LDMINTI']
arg_format = ['ciw','ci']
def execute(self):
self.args[0].value = program.mem_i[self.args[1].value]
@base.vectorize
class stminti(base.WriteMemoryInstruction):
r""" Sets \verb+Ci[cj]+ to be the value $ci_i$. """
code = base.opcodes['STMINTI']
arg_format = ['ci','ci']
def execute(self):
program.mem_i[self.args[1].value] = self.args[0].value
@base.vectorize
class gldmci(base.ReadMemoryInstruction):
r""" Assigns register $c_i$ the value in memory \verb+C[cj]+. """
code = base.opcodes['LDMCI'] + 0x100
arg_format = ['cgw','ci']
def execute(self):
self.args[0].value = program.mem_c[self.args[1].value]
@base.vectorize
class gldmsi(base.ReadMemoryInstruction):
r""" Assigns register $s_i$ the value in memory \verb+S[cj]+. """
code = base.opcodes['LDMSI'] + 0x100
arg_format = ['sgw','ci']
def execute(self):
self.args[0].value = program.mem_s[self.args[1].value]
@base.vectorize
class gstmci(base.WriteMemoryInstruction):
r""" Sets \verb+C[cj]+ to be the value $c_i$. """
code = base.opcodes['STMCI'] + 0x100
arg_format = ['cg','ci']
def execute(self):
program.mem_c[self.args[1].value] = self.args[0].value
@base.vectorize
class gstmsi(base.WriteMemoryInstruction):
r""" Sets \verb+S[cj]+ to be the value $s_i$. """
code = base.opcodes['STMSI'] + 0x100
arg_format = ['sg','ci']
def execute(self):
program.mem_s[self.args[1].value] = self.args[0].value
@base.gf2n
@base.vectorize
class protectmems(base.Instruction):
r""" Protects secret memory range $[ci_i,ci_j)$. """
code = base.opcodes['PROTECTMEMS']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class protectmemc(base.Instruction):
r""" Protects clear memory range $[ci_i,ci_j)$. """
code = base.opcodes['PROTECTMEMC']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class protectmemint(base.Instruction):
r""" Protects integer memory range $[ci_i,ci_j)$. """
code = base.opcodes['PROTECTMEMINT']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class movc(base.Instruction):
r""" Assigns register $c_i$ the value in the register $c_j$. """
__slots__ = ["code"]
code = base.opcodes['MOVC']
arg_format = ['cw','c']
def execute(self):
self.args[0].value = self.args[1].value
@base.gf2n
@base.vectorize
class movs(base.Instruction):
r""" Assigns register $s_i$ the value in the register $s_j$. """
__slots__ = ["code"]
code = base.opcodes['MOVS']
arg_format = ['sw','s']
def execute(self):
self.args[0].value = self.args[1].value
@base.vectorize
class movint(base.Instruction):
r""" Assigns register $ci_i$ the value in the register $ci_j$. """
__slots__ = ["code"]
code = base.opcodes['MOVINT']
arg_format = ['ciw','ci']
@base.vectorize
class pushint(base.StackInstruction):
r""" Pushes register $ci_i$ to the thread-local stack. """
code = base.opcodes['PUSHINT']
arg_format = ['ci']
@base.vectorize
class popint(base.StackInstruction):
r""" Pops from the thread-local stack to register $ci_i$. """
code = base.opcodes['POPINT']
arg_format = ['ciw']
###
### Machine
###
@base.vectorize
class ldtn(base.Instruction):
r""" Assigns register $c_i$ the number of the current thread. """
code = base.opcodes['LDTN']
arg_format = ['ciw']
@base.vectorize
class ldarg(base.Instruction):
r""" Assigns register $c_i$ the argument passed to the current thread. """
code = base.opcodes['LDARG']
arg_format = ['ciw']
@base.vectorize
class starg(base.Instruction):
r""" Assigns register $c_i$ to the argument. """
code = base.opcodes['STARG']
arg_format = ['ci']
@base.gf2n
class reqbl(base.Instruction):
r""" Require bit length $n". """
code = base.opcodes['REQBL']
arg_format = ['int']
class time(base.Instruction):
r""" Output epoch time. """
code = base.opcodes['TIME']
arg_format = []
class start(base.Instruction):
r""" Start timer. """
code = base.opcodes['START']
arg_format = ['i']
class stop(base.Instruction):
r""" Stop timer. """
code = base.opcodes['STOP']
arg_format = ['i']
class use(base.Instruction):
r""" Offline data usage. """
code = base.opcodes['USE']
arg_format = ['int','int','int']
class use_inp(base.Instruction):
r""" Input usage. """
code = base.opcodes['USE_INP']
arg_format = ['int','int','int']
class run_tape(base.Instruction):
r""" Start tape $n$ in thread $c_i$ with argument $c_j$. """
code = base.opcodes['RUN_TAPE']
arg_format = ['int','int','int']
class join_tape(base.Instruction):
r""" Join thread $c_i$. """
code = base.opcodes['JOIN_TAPE']
arg_format = ['int']
class crash(base.IOInstruction):
r""" Crash runtime. """
code = base.opcodes['CRASH']
arg_format = []
@base.gf2n
class use_prep(base.Instruction):
r""" Input usage. """
code = base.opcodes['USE_PREP']
arg_format = ['str','int']
###
### Basic arithmetic
###
@base.gf2n
@base.vectorize
class addc(base.AddBase):
r""" Clear addition $c_i=c_j+c_k$. """
__slots__ = []
code = base.opcodes['ADDC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class adds(base.AddBase):
r""" Secret addition $s_i=s_j+s_k$. """
__slots__ = []
code = base.opcodes['ADDS']
arg_format = ['sw','s','s']
#@base.gf2n
#@base.vectorize
#class eadds(base.AddBase):
r""" Secret addition $s_i=s_j+s_k$. """
# __slots__ = []
# code = base.opcodes['EADDS']
# arg_format = ['sw','s','s']
@base.gf2n
@base.vectorize
class addm(base.AddBase):
r""" Mixed addition $s_i=s_j+c_k$. """
__slots__ = []
code = base.opcodes['ADDM']
arg_format = ['sw','s','c']
#@base.gf2n
#@base.vectorize
#class eaddm(base.AddBase):
r""" Mixed addition $s_i=s_j+c_k$. """
# __slots__ = []
# code = base.opcodes['EADDM']
# arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class subc(base.SubBase):
r""" Clear subtraction $c_i=c_j-c_k$. """
__slots__ = []
code = base.opcodes['SUBC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class subs(base.SubBase):
r""" Secret subtraction $s_i=s_j-s_k$. """
__slots__ = []
code = base.opcodes['SUBS']
arg_format = ['sw','s','s']
#@base.gf2n
#@base.vectorize
#class esubs(base.SubBase):
r""" Secret subtraction $s_i=s_j-s_k$. """
# __slots__ = []
# code = base.opcodes['ESUBS']
# arg_format = ['sw','s','s']
@base.gf2n
@base.vectorize
class subml(base.SubBase):
r""" Mixed subtraction $s_i=s_j-c_k$. """
__slots__ = []
code = base.opcodes['SUBML']
arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class submr(base.SubBase):
r""" Mixed subtraction $s_i=c_j-s_k$. """
__slots__ = []
code = base.opcodes['SUBMR']
arg_format = ['sw','c','s']
@base.gf2n
@base.vectorize
class mulc(base.MulBase):
r""" Clear multiplication $c_i=c_j \cdot c_k$. """
__slots__ = []
code = base.opcodes['MULC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class mulm(base.MulBase):
r""" Mixed multiplication $s_i=c_j \cdot s_k$. """
__slots__ = []
code = base.opcodes['MULM']
arg_format = ['sw','s','c']
#@base.gf2n
#@base.vectorize
#class emulm(base.MulBase):
r""" Mixed multiplication $s_i=c_j \cdot s_k$. """
# __slots__ = []
# code = base.opcodes['EMULM']
# arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class divc(base.Instruction):
r""" Clear division $c_i=c_j/c_k$. """
__slots__ = []
code = base.opcodes['DIVC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = self.args[1].value * pow(self.args[2].value, program.P-2, program.P) % program.P
@base.gf2n
@base.vectorize
class modc(base.Instruction):
r""" Clear modular reduction $c_i=c_j/c_k$. """
__slots__ = []
code = base.opcodes['MODC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = self.args[1].value % self.args[2].value
@base.vectorize
class legendrec(base.Instruction):
r""" Clear Legendre symbol computation, $c_i = (c_j / p)$. """
__slots__ = []
code = base.opcodes['LEGENDREC']
arg_format = ['cw','c']
@base.vectorize
class digestc(base.Instruction):
r""" Clear truncated hash computation, $c_i = H(c_j)[bytes]$. """
__slots__ = []
code = base.opcodes['DIGESTC']
arg_format = ['cw','c','int']
###
### Bitwise operations
###
@base.gf2n
@base.vectorize
class andc(base.Instruction):
r""" Clear logical AND $c_i = c_j \land c_k$ """
__slots__ = []
code = base.opcodes['ANDC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value & self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class orc(base.Instruction):
r""" Clear logical OR $c_i = c_j \lor c_k$ """
__slots__ = []
code = base.opcodes['ORC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value | self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class xorc(base.Instruction):
r""" Clear logical XOR $c_i = c_j \oplus c_k$ """
__slots__ = []
code = base.opcodes['XORC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value ^ self.args[2].value) % program.P
@base.vectorize
class notc(base.Instruction):
r""" Clear logical NOT $c_i = \lnot c_j$ """
__slots__ = []
code = base.opcodes['NOTC']
arg_format = ['cw','c', 'int']
def execute(self):
self.args[0].value = (~self.args[1].value + 2 ** self.args[2]) % program.P
@base.vectorize
class gnotc(base.Instruction):
r""" Clear logical NOT $cg_i = \lnot cg_j$ """
__slots__ = []
code = (1 << 8) + base.opcodes['NOTC']
arg_format = ['cgw','cg']
def is_gf2n(self):
return True
def execute(self):
self.args[0].value = ~self.args[1].value
@base.vectorize
class gbitdec(base.Instruction):
r""" Store every $n$-th bit of $cg_i$ in $cg_j, \dots$. """
__slots__ = []
code = base.opcodes['GBITDEC']
arg_format = tools.chain(['cg', 'int'], itertools.repeat('cgw'))
def is_g2fn(self):
return True
def has_var_args(self):
return True
# ADDED
#@base.vectorize
#class e_skew_dec(base.Instruction):
#r""" Pre-computation for bit-decomposition """
#__slots__ = []
#code = base.opcodes['E_SKEW_DEC']
#arg_format = tools.chain(['s', 'int'], itertools.repeat('sgw'))
@base.vectorize
class e_skew_bit_dec(base.Instruction):
r""" Pre-computation for bit-decomposition """
__slots__ = []
code = base.opcodes['E_SKEW_BIT_DEC']
arg_format = tools.chain(['s', 'int'], itertools.repeat('sgw'))
class e_skew_bit_rec(base.Instruction):
r""" Pre-computation for ring-composition """
__slots__ = []
code = base.opcodes['E_SKEW_BIT_REC']
arg_format = ['sg', 'sgw', 'sgw', 'sgw']
@base.vectorize
class e_skew_bit_inj(base.Instruction):
r""" Pre-computation for bit-injection """
__slots__ = []
code = base.opcodes['E_SKEW_BIT_INJ']
arg_format = ['sg', 'sw', 'sw', 'sw']
#class e_post_rec(base.Instruction):
#r""" Post-computation for ring-composition """
#__slots__ = []
#code = base.opcodes['E_POST_REC']
#arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
class e_skew_ring_rec(base.Instruction):
r""" Post-computation for ring-composition """
__slots__ = []
code = base.opcodes['E_SKEW_RING_REC']
arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
# END ADDED
@base.vectorize
class gbitcom(base.Instruction):
r""" Store the bits $cg_j, \dots$ as every $n$-th bit of $cg_i$. """
__slots__ = []
code = base.opcodes['GBITCOM']
arg_format = tools.chain(['cgw', 'int'], itertools.repeat('cg'))
def is_g2fn(self):
return True
def has_var_args(self):
return True
###
### Special GF(2) arithmetic instructions
###
@base.vectorize
class gmulbitc(base.MulBase):
r""" Clear GF(2^n) by clear GF(2) multiplication """
__slots__ = []
code = base.opcodes['GMULBITC']
arg_format = ['cgw','cg','cg']
def is_gf2n(self):
return True
@base.vectorize
class gmulbitm(base.MulBase):
r""" Secret GF(2^n) by clear GF(2) multiplication """
__slots__ = []
code = base.opcodes['GMULBITM']
arg_format = ['sgw','sg','cg']
def is_gf2n(self):
return True
###
### Arithmetic with immediate values
###
@base.gf2n
@base.vectorize
class addci(base.ClearImmediate):
""" Clear addition of immediate value $c_i=c_j+n$. """
__slots__ = []
code = base.opcodes['ADDCI']
op = '__add__'
@base.gf2n
@base.vectorize
class addsi(base.SharedImmediate):
""" Secret addition of immediate value $s_i=s_j+n$. """
__slots__ = []
code = base.opcodes['ADDSI']
op = '__add__'
@base.gf2n
@base.vectorize
class subci(base.ClearImmediate):
r""" Clear subtraction of immediate value $c_i=c_j-n$. """
__slots__ = []
code = base.opcodes['SUBCI']
op = '__sub__'
@base.gf2n
@base.vectorize
class subsi(base.SharedImmediate):
r""" Secret subtraction of immediate value $s_i=s_j-n$. """
__slots__ = []
code = base.opcodes['SUBSI']
op = '__sub__'
@base.gf2n
@base.vectorize
class subcfi(base.ClearImmediate):
r""" Clear subtraction from immediate value $c_i=n-c_j$. """
__slots__ = []
code = base.opcodes['SUBCFI']
op = '__rsub__'
@base.gf2n
@base.vectorize
class subsfi(base.SharedImmediate):
r""" Secret subtraction from immediate value $s_i=n-s_j$. """
__slots__ = []
code = base.opcodes['SUBSFI']
op = '__rsub__'
@base.gf2n
@base.vectorize
class mulci(base.ClearImmediate):
r""" Clear multiplication by immediate value $c_i=c_j \cdot n$. """
__slots__ = []
code = base.opcodes['MULCI']
op = '__mul__'
@base.gf2n
@base.vectorize
class mulsi(base.SharedImmediate):
r""" Secret multiplication by immediate value $s_i=s_j \cdot n$. """
__slots__ = []
code = base.opcodes['MULSI']
op = '__mul__'
@base.gf2n
@base.vectorize
class divci(base.ClearImmediate):
r""" Clear division by immediate value $c_i=c_j/n$. """
__slots__ = []
code = base.opcodes['DIVCI']
def execute(self):
self.args[0].value = self.args[1].value * pow(self.args[2], program.P-2, program.P) % program.P
@base.gf2n
@base.vectorize
class modci(base.ClearImmediate):
r""" Clear modular reduction by immediate value $c_i=c_j \mod{n}$. """
__slots__ = []
code = base.opcodes['MODCI']
op = '__mod__'
@base.gf2n
@base.vectorize
class andci(base.ClearImmediate):
r""" Clear logical AND with immediate value $c_i = c_j \land c_k$ """
__slots__ = []
code = base.opcodes['ANDCI']
op = '__and__'
@base.gf2n
@base.vectorize
class xorci(base.ClearImmediate):
r""" Clear logical XOR with immediate value $c_i = c_j \oplus c_k$ """
__slots__ = []
code = base.opcodes['XORCI']
op = '__xor__'
@base.gf2n
@base.vectorize
class orci(base.ClearImmediate):
r""" Clear logical OR with immediate value $c_i = c_j \vee c_k$ """
__slots__ = []
code = base.opcodes['ORCI']
op = '__or__'
###
### Shift instructions
###
@base.gf2n
@base.vectorize
class shlc(base.Instruction):
r""" Clear bitwise shift left $c_i = c_j << c_k$ """
__slots__ = []
code = base.opcodes['SHLC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value << self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class shrc(base.Instruction):
r""" Clear bitwise shift right $c_i = c_j >> c_k$ """
__slots__ = []
code = base.opcodes['SHRC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value >> self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class shlci(base.ClearShiftInstruction):
r""" Clear bitwise shift left by immediate value $c_i = c_j << n$ """
__slots__ = []
code = base.opcodes['SHLCI']
op = '__lshift__'
@base.gf2n
@base.vectorize
class shrci(base.ClearShiftInstruction):
r""" Clear bitwise shift right by immediate value $c_i = c_j >> n$ """
__slots__ = []
code = base.opcodes['SHRCI']
op = '__rshift__'
###
### Data access instructions
###
@base.gf2n
@base.vectorize
class triple(base.DataInstruction):
r""" Load secret variables $s_i$, $s_j$ and $s_k$
with the next multiplication triple. """
__slots__ = ['data_type']
code = base.opcodes['TRIPLE']
arg_format = ['sw','sw','sw']
data_type = 'triple'
def execute(self):
self.args[0].value = randint(0,program.P)
self.args[1].value = randint(0,program.P)
self.args[2].value = (self.args[0].value * self.args[1].value) % program.P
@base.vectorize
class gbittriple(base.DataInstruction):
r""" Load secret variables $s_i$, $s_j$ and $s_k$
with the next GF(2) multiplication triple. """
__slots__ = ['data_type']
code = base.opcodes['GBITTRIPLE']
arg_format = ['sgw','sgw','sgw']
data_type = 'bittriple'
field_type = 'gf2n'
def is_gf2n(self):
return True
@base.vectorize
class gbitgf2ntriple(base.DataInstruction):
r""" Load secret variables $s_i$, $s_j$ and $s_k$
with the next GF(2) and GF(2^n) multiplication triple. """
code = base.opcodes['GBITGF2NTRIPLE']
arg_format = ['sgw','sgw','sgw']
data_type = 'bitgf2ntriple'
field_type = 'gf2n'
def is_gf2n(self):
return True
@base.gf2n
@base.vectorize
class bit(base.DataInstruction):
r""" Load secret variable $s_i$
with the next secret bit. """
__slots__ = []
code = base.opcodes['BIT']
arg_format = ['sw']
data_type = 'bit'
def execute(self):
self.args[0].value = randint(0,1)
@base.gf2n
@base.vectorize
class square(base.DataInstruction):
r""" Load secret variables $s_i$ and $s_j$
with the next squaring tuple. """
__slots__ = []
code = base.opcodes['SQUARE']
arg_format = ['sw','sw']
data_type = 'square'
def execute(self):
self.args[0].value = randint(0,program.P)
self.args[1].value = (self.args[0].value * self.args[0].value) % program.P
@base.gf2n
@base.vectorize
class inverse(base.DataInstruction):
r""" Load secret variables $s_i$, $s_j$ and $s_k$
with the next inverse triple. """
__slots__ = []
code = base.opcodes['INV']
arg_format = ['sw','sw']
data_type = 'inverse'
def execute(self):
self.args[0].value = randint(0,program.P)
import gmpy
self.args[1].value = int(gmpy.invert(self.args[0].value, program.P))
@base.gf2n
@base.vectorize
class inputmask(base.Instruction):
r""" Load secret $s_i$ with the next input mask for player $p$ and
write the mask on player $p$'s private output. """
__slots__ = []
code = base.opcodes['INPUTMASK']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
@base.gf2n
@base.vectorize
class prep(base.Instruction):
r""" Custom preprocessed data """
__slots__ = []
code = base.opcodes['PREP']
arg_format = tools.chain(['str'], itertools.repeat('sw'))
gf2n_arg_format = tools.chain(['str'], itertools.repeat('sgw'))
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, self.args[0]), 1)
def has_var_args(self):
return True
###
### I/O
###
@base.gf2n
@base.vectorize
class asm_input(base.IOInstruction):
r""" Receive input from player $p$ and put in register $s_i$. """
__slots__ = []
code = base.opcodes['INPUT']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
def execute(self):
self.args[0].value = _python_input("Enter player %d's input:" % self.args[1]) % program.P
@base.gf2n
class startinput(base.RawInputInstruction):
r""" Receive inputs from player $p$. """
__slots__ = []
code = base.opcodes['STARTINPUT']
arg_format = ['p', 'int']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[0]), \
self.args[1])
class stopinput(base.RawInputInstruction):
r""" Receive inputs from player $p$ and put in registers. """
__slots__ = []
code = base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sw'))
def has_var_args(self):
return True
class gstopinput(base.RawInputInstruction):
r""" Receive inputs from player $p$ and put in registers. """
__slots__ = []
code = 0x100 + base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sgw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class print_mem(base.IOInstruction):
r""" Print value in clear memory \verb|C[ci]| to stdout. """
__slots__ = []
code = base.opcodes['PRINTMEM']
arg_format = ['c']
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg(base.IOInstruction):
r""" Print value of register \verb|ci| to stdout and optional 4-char comment. """
__slots__ = []
code = base.opcodes['PRINTREG']
arg_format = ['c','i']
def __init__(self, reg, comment=''):
super(print_reg_class, self).__init__(reg, self.str_to_int(comment))
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg_plain(base.IOInstruction):
r""" Print only the value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['PRINTREGPLAIN']
arg_format = ['c']
#@base.gf2n
@base.vectorize
class e_print_fixed_plain(base.IOInstruction):
r""" Print only the fixed value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['E_PRINTFIXEDPLAIN']
arg_format = ['c', 'int']
@base.vectorize
class print_float_plain(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTFLOATPLAIN']
arg_format = ['c', 'c', 'c', 'c']
class print_int(base.IOInstruction):
r""" Print only the value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['PRINTINT']
arg_format = ['ci']
class print_char(base.IOInstruction):
r""" Print a single character to stdout. """
code = base.opcodes['PRINTCHR']
arg_format = ['int']
def __init__(self, ch):
super(print_char, self).__init__(ord(ch))
class print_char4(base.IOInstruction):
r""" Print a 4 character string. """
code = base.opcodes['PRINTSTR']
arg_format = ['int']
def __init__(self, val):
super(print_char4, self).__init__(self.str_to_int(val))
@base.vectorize
class print_char_regint(base.IOInstruction):
r""" Print register $ci_i$ as a single character to stdout. """
code = base.opcodes['PRINTCHRINT']
arg_format = ['ci']
@base.vectorize
class print_char4_regint(base.IOInstruction):
r""" Print register $ci_i$ as a four character string to stdout. """
code = base.opcodes['PRINTSTRINT']
arg_format = ['ci']
@base.vectorize
class pubinput(base.PublicFileIOInstruction):
__slots__ = []
code = base.opcodes['PUBINPUT']
arg_format = ['ciw']
@base.vectorize
class readsocketc(base.IOInstruction):
"""Read a variable number of clear GF(p) values from socket for a specified client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETC']
arg_format = tools.chain(['ci'], itertools.repeat('cw'))
def has_var_args(self):
return True
@base.vectorize
class readsockets(base.IOInstruction):
"""Read a variable number of secret shares + MACs from socket for a client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETS']
arg_format = tools.chain(['ci'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.vectorize
class readsocketint(base.IOInstruction):
"""Read variable number of 32-bit int from socket for a client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETINT']
arg_format = tools.chain(['ci'], itertools.repeat('ciw'))
def has_var_args(self):
return True
@base.vectorize
class writesocketc(base.IOInstruction):
"""
Write a variable number of clear GF(p) values from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETC']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('c'))
def has_var_args(self):
return True
@base.vectorize
class writesockets(base.IOInstruction):
"""
Write a variable number of secret shares + MACs from registers into a socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETS']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketshare(base.IOInstruction):
"""
Write a variable number of secret shares (without MACs) from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETSHARE']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketint(base.IOInstruction):
"""
Write a variable number of 32-bit ints from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETINT']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('ci'))
def has_var_args(self):
return True
class listen(base.IOInstruction):
"""Open a server socket on a party specific port number and listen for client connections (non-blocking)"""
__slots__ = []
code = base.opcodes['LISTEN']
arg_format = ['int']
class acceptclientconnection(base.IOInstruction):
"""Wait for a connection at the given port and write socket handle to register """
__slots__ = []
code = base.opcodes['ACCEPTCLIENTCONNECTION']
arg_format = ['ciw', 'int']
class connectipv4(base.IOInstruction):
"""Connect to server at IPv4 address in register \verb|cj| at given port. Write socket handle to register \verb|ci|"""
__slots__ = []
code = base.opcodes['CONNECTIPV4']
arg_format = ['ciw', 'ci', 'int']
class readclientpublickey(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id"""
__slots__ = []
code = base.opcodes['READCLIENTPUBLICKEY']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class initsecuresocket(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id,
negotiate a shared key via STS and use it for replay resistant comms"""
__slots__ = []
code = base.opcodes['INITSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class respsecuresocket(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id,
negotiate a shared key via STS and use it for replay resistant comms"""
__slots__ = []
code = base.opcodes['RESPSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class writesharestofile(base.IOInstruction):
"""Write shares to a file"""
__slots__ = []
code = base.opcodes['WRITEFILESHARE']
arg_format = itertools.repeat('s')
def has_var_args(self):
return True
class readsharesfromfile(base.IOInstruction):
"""
Read shares from a file. Pass in start posn, return finish posn, shares.
Finish posn will return:
-2 file not found
-1 eof reached
position in file after read finished
"""
__slots__ = []
code = base.opcodes['READFILESHARE']
arg_format = tools.chain(['ci', 'ciw'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class raw_output(base.PublicFileIOInstruction):
r""" Raw output of register \verb|ci| to file. """
__slots__ = []
code = base.opcodes['RAWOUTPUT']
arg_format = ['c']
@base.gf2n
@base.vectorize
class startprivateoutput(base.Instruction):
r""" Initiate private output to $n$ of $s_j$ via $s_i$. """
__slots__ = []
code = base.opcodes['STARTPRIVATEOUTPUT']
arg_format = ['sw','s','p']
@base.gf2n
@base.vectorize
class stopprivateoutput(base.Instruction):
r""" Previously iniated private output to $n$ via $c_i$. """
__slots__ = []
code = base.opcodes['STOPPRIVATEOUTPUT']
arg_format = ['c','p']
@base.vectorize
class rand(base.Instruction):
__slots__ = []
code = base.opcodes['RAND']
arg_format = ['ciw','ci']
###
### Integer operations
###
@base.vectorize
class ldint(base.Instruction):
__slots__ = []
code = base.opcodes['LDINT']
arg_format = ['ciw', 'i']
@base.vectorize
class addint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['ADDINT']
@base.vectorize
class subint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['SUBINT']
@base.vectorize
class mulint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['MULINT']
@base.vectorize
class divint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['DIVINT']
###
### Clear comparison instructions
###
@base.vectorize
class eqzc(base.UnaryComparisonInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{==} 0)$. """
__slots__ = []
code = base.opcodes['EQZC']
def execute(self):
if self.args[1].value == 0:
self.args[0].value = 1
else:
self.args[0].value = 0
@base.vectorize
class ltzc(base.UnaryComparisonInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{<} 0)$. """
__slots__ = []
code = base.opcodes['LTZC']
@base.vectorize
class ltc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{<} c_k)$. """
__slots__ = []
code = base.opcodes['LTC']
@base.vectorize
class gtc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{>} c_k)$. """
__slots__ = []
code = base.opcodes['GTC']
@base.vectorize
class eqc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{==} c_k)$. """
__slots__ = []
code = base.opcodes['EQC']
###
### Jumps etc
###
class jmp(base.JumpInstruction):
""" Unconditional relative jump of $n+1$ instructions. """
__slots__ = []
code = base.opcodes['JMP']
arg_format = ['int']
jump_arg = 0
def execute(self):
pass
class jmpi(base.JumpInstruction):
""" Unconditional relative jump of $c_i+1$ instructions. """
__slots__ = []
code = base.opcodes['JMPI']
arg_format = ['ci']
jump_arg = 0
class jmpnz(base.JumpInstruction):
r""" Jump $n+1$ instructions if $c_i \neq 0$.
e.g.
jmpnz(c, n) : advance n+1 instructions if c is non-zero
jmpnz(c, 0) : do nothing
jmpnz(c, -1): infinite loop if c is non-zero
"""
__slots__ = []
code = base.opcodes['JMPNZ']
arg_format = ['ci', 'int']
jump_arg = 1
def execute(self):
pass
class jmpeqz(base.JumpInstruction):
r""" Jump $n+1$ instructions if $c_i == 0$. """
__slots__ = []
code = base.opcodes['JMPEQZ']
arg_format = ['ci', 'int']
jump_arg = 1
def execute(self):
pass
###
### Conversions
###
@base.gf2n
@base.vectorize
class convint(base.Instruction):
""" Convert from integer register $ci_j$ to clear modp register $c_i$. """
__slots__ = []
code = base.opcodes['CONVINT']
arg_format = ['cw', 'ci']
@base.vectorize
class convmodp(base.Instruction):
""" Convert from clear modp register $c_j$ to integer register $ci_i$. """
__slots__ = []
code = base.opcodes['CONVMODP']
arg_format = ['ciw', 'c', 'int']
def __init__(self, *args, **kwargs):
bitlength = kwargs.get('bitlength', program.bit_length)
super(convmodp_class, self).__init__(*(args + (bitlength,)))
@base.vectorize
class gconvgf2n(base.Instruction):
""" Convert from clear modp register $c_j$ to integer register $ci_i$. """
__slots__ = []
code = base.opcodes['GCONVGF2N']
arg_format = ['ciw', 'cg']
###
### Other instructions
###
@base.gf2n
@base.vectorize
class startopen(base.VarArgsInstruction):
""" Start opening secret register $s_i$. """
__slots__ = []
code = base.opcodes['STARTOPEN']
arg_format = itertools.repeat('s')
def execute(self):
for arg in self.args[::-1]:
program.curr_block.open_queue.append(arg.value)
@base.gf2n
@base.vectorize
class e_startopen(startopen_class):
""" Start opening secret register $s_i$. """
__slots__ = []
code = base.opcodes['E_STARTOPEN']
arg_format = itertools.repeat('s')
def execute(self):
for arg in self.args[::-1]:
program.curr_block.open_queue.append(arg.value)
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class stopopen(base.VarArgsInstruction):
""" Store previous opened value in $c_i$. """
__slots__ = []
code = base.opcodes['STOPOPEN']
arg_format = itertools.repeat('cw')
def execute(self):
for arg in self.args:
arg.value = program.curr_block.open_queue.pop()
@base.gf2n
@base.vectorize
class e_stopopen(stopopen_class):
""" Store previous opened value in $c_i$. """
__slots__ = []
code = base.opcodes['E_STOPOPEN']
arg_format = itertools.repeat('cw')
def execute(self):
for arg in self.args:
arg.value = program.curr_block.open_queue.pop()
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class e_mult(base.VarArgsInstruction):
""" Start mult secret register $s_i$. """
__slots__ = []
code = base.opcodes['E_MULT']
arg_format = tools.cycle(['sw', 's', 's'])
# rename 'open' to avoid conflict with built-in open function
@base.gf2n
@base.vectorize
class asm_open(base.VarArgsInstruction):
""" Open the value in $s_j$ and assign it to $c_i$. """
__slots__ = []
code = base.opcodes['OPEN']
arg_format = tools.cycle(['cw','s'])
###
### CISC-style instructions
###
# rename 'open' to avoid conflict with built-in open function
# @base.gf2n
# @base.vectorize
# class asm_open(base.CISC):
# """ Open the value in $s_j$ and assign it to $c_i$. """
# __slots__ = []
# arg_format = ['cw','s']
#
# def expand(self):
#
# startopen(self.args[1])
# stopopen(self.args[0])
#
#
# """ Extended (NEC) open the value in $s_j$ and assign it to $c_i$. """
# #estartopen(self.args[1])
# #estopopen(self.args[0])
@base.gf2n
@base.vectorize
class e_lessthan(base.CISC):
""" less than function . """
__slots__ = []
arg_format = ['s','s','int','sgw']
def expand(self):
step = self.args[2]
tmp = program.curr_block.new_reg('s')
bit_array_sub = [program.curr_block.new_reg('sg') for _ in range(step)]
# signed ver. (start)
prod_left = program.curr_block.new_reg('sg')
prod_right = program.curr_block.new_reg('sg')
prod = program.curr_block.new_reg('sg')
ans = program.curr_block.new_reg('sg')
bit_array_self = [program.curr_block.new_reg('sg') for _ in range(step)]
bit_array_other = [program.curr_block.new_reg('sg') for _ in range(step)]
# signed ver. (end)
subs(tmp, self.args[0], self.args[1])
e_bitdec(tmp, step, *bit_array_sub)
# signed ver. (start)
e_bitdec(self.args[0], step, *bit_array_self)
e_bitdec(self.args[1], step, *bit_array_other)
gadds(prod_left, bit_array_self[step - 1], bit_array_other[step - 1])
gadds(prod_right, bit_array_sub[step - 1], bit_array_self[step - 1])
gmuls(prod, prod_left, prod_right)
# ge_startmult(prod_left, prod_right)
# ge_stopmult(prod)
gadds(self.args[3], prod, bit_array_sub[step - 1])
# signed ver. (end)
# DEBUG (start)
"""
c_bit_array = [cgf2n() for _ in range(step)]
for i in range(step):
print_char4("i=")
print_char4(str(i))
print_char('\n')
gstartopen(bit_array[i])
gstopopen(c_bit_array[i])
gprint_reg_plain(c_bit_array[i])
print_char('\n')
"""
# DEBUG (end)
# result = bit_array_sub[step - 1].e_bit_inject()
@base.gf2n
@base.vectorize
class e_trunc(base.CISC):
""" Truncate . """
__slots__ = []
arg_format = ['s','int','sw']
def expand(self):
a = [program.curr_block.new_reg('sg') for _ in range(64)]
b = [program.curr_block.new_reg('sg') for _ in range(64)]
e_bitdec(self.args[0], 64, *a)
for i in range(64):
if i + self.args[1] >= 64 :
gldsi(b[i],0)
else :
b[i] = a[i + self.args[1]]
e_bitrec(self.args[2], 64, *b)
# return a
@base.gf2n
@base.vectorize
class e_pow2(base.CISC):
"""calculate 2^{a} by squaring (not optimized)"""
__slots__ = []
arg_format = ['s', 'int', 'sw']
def expand(self):
m = int(math.ceil(math.log(self.args[1],2)))
ai = [program.curr_block.new_reg('sg') for _ in range(m)]
a = [program.curr_block.new_reg('s') for _ in range(m)]
pow2k = [program.curr_block.new_reg('c') for _ in range(m)]
tmp_x = [program.curr_block.new_reg('s') for _ in range(m)]
tmp2_x = [program.curr_block.new_reg('s') for _ in range(m)]
tmp3_x = [program.curr_block.new_reg('s') for _ in range(m)]
x = [program.curr_block.new_reg('s') for _ in range(m)]
e_bitdec(self.args[0], m ,*ai)
for i in range(m):
e_bitinj(ai[i], a[i])
ldi(pow2k[0], 2)
for i in range(0,m-1):
mulc(pow2k[i+1], pow2k[i], pow2k[i])
mulm(tmp_x[0], a[0], pow2k[0])
addsi(tmp2_x[0], tmp_x[0], 1)
subs(tmp3_x[0], tmp2_x[0], a[0])
for i in range(1,m):
mulm(tmp_x[i], a[i], pow2k[i])
addsi(tmp2_x[i], tmp_x[i], 1)
subs(tmp3_x[i], tmp2_x[i], a[i])
x[0] = tmp3_x[0]
for i in range(0,m-1):
muls(x[i+1], tmp3_x[i+1], x[i])
addsi(self.args[2], x[m-1], 0)
#addm(self.args[2],tmp, pow2k[3])
#@base.gf2n
@base.vectorize
class e_prefixor(base.CISC):
"""n-rounds prefixOR operation including bit decomposition"""
__slots__ = []
arg_format = tools.chain(['s', 'int'], itertools.repeat('sw'))
def expand(self):
array1 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
array2 = [program.curr_block.new_reg('s') for _ in range(self.args[1])]
garray = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp1 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp2 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp3 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp4 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
n = self.args[1]
e_bitdec(self.args[0], n, *array1)
garray[0] = array1[n -1]
e_bitinj(array1[n-1], self.args[2])
for i in range(1, n):
gaddsi(tmp1[i], array1[n - (i + 1)], 1)
gaddsi(tmp2[i], garray[i - 1], 1)
gmuls(tmp3[i], tmp1[i], tmp2[i])
gaddsi(garray[i], tmp3[i], 1)
e_bitinj(garray[i], self.args[2 + i])
#OR(a,b)=((1+a)*(1+b))+1
#@base.gf2n
@base.vectorize
class e_bitdec(base.CISC):
r""" Convert a share mod 2^n to n-array of shares mod 2. """
__slots__ = []
code = base.opcodes['E_BITDEC']
arg_format = tools.chain(['s', 'int'], itertools.repeat('sgw'))
def expand(self):
#conf = ConfigParser.ConfigParser()
#print conf
#conf.read('config.ini')
#print conf.get('DEFAULT', 'DEBUG')
#print inifile.get('default', 'type_of_decomposition')
#print conf.get('conversion', 'type_of_decomposition')
type_of_decomposition = "round_n"
if type_of_decomposition == 'round_sqrt':
#decomposition : square_root(n) round ver. (start)
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * 64)]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(64)]
z = [program.curr_block.new_reg('sg') for i in range(64)]
in_c_left = [program.curr_block.new_reg('sg') for i in range(64)]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(64)]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(64)]
c = [program.curr_block.new_reg('sg') for i in range(64 + 1)]
c_xor_d = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
in_d_left = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
in_d_prod = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(64)]
first_4bit_d = [program.curr_block.new_reg('sg') for i in range(5)]
d_4bit_block = [[program.curr_block.new_reg('sg') for i in range(5)] for j in range(2)]
d_5bit_block = [[program.curr_block.new_reg('sg') for i in range(6)] for j in range(2)]
d_6bit_block = [[program.curr_block.new_reg('sg') for i in range(7)] for j in range(2)]
d_7bit_block = [[program.curr_block.new_reg('sg') for i in range(8)] for j in range(2)]
d_8bit_block = [[program.curr_block.new_reg('sg') for i in range(9)] for j in range(2)]
d_9bit_block = [[program.curr_block.new_reg('sg') for i in range(10)] for j in range(2)]
d_10bit_block = [[program.curr_block.new_reg('sg') for i in range(11)] for j in range(2)]
d_11bit_block = [[program.curr_block.new_reg('sg') for i in range(12)] for j in range(2)]
in_mux_right_4 = [program.curr_block.new_reg('sg') for i in range(5)]
in_mux_prod_4 = [program.curr_block.new_reg('sg') for i in range(5)]
in_mux_right_5 = [program.curr_block.new_reg('sg') for i in range(6)]
in_mux_prod_5 = [program.curr_block.new_reg('sg') for i in range(6)]
in_mux_right_6 = [program.curr_block.new_reg('sg') for i in range(7)]
in_mux_prod_6 = [program.curr_block.new_reg('sg') for i in range(7)]
in_mux_right_7 = [program.curr_block.new_reg('sg') for i in range(8)]
in_mux_prod_7 = [program.curr_block.new_reg('sg') for i in range(8)]
in_mux_right_8 = [program.curr_block.new_reg('sg') for i in range(9)]
in_mux_prod_8 = [program.curr_block.new_reg('sg') for i in range(9)]
in_mux_right_9 = [program.curr_block.new_reg('sg') for i in range(10)]
in_mux_prod_9 = [program.curr_block.new_reg('sg') for i in range(10)]
in_mux_right_10 = [program.curr_block.new_reg('sg') for i in range(11)]
in_mux_prod_10 = [program.curr_block.new_reg('sg') for i in range(11)]
in_mux_right_11 = [program.curr_block.new_reg('sg') for i in range(12)]
in_mux_prod_11 = [program.curr_block.new_reg('sg') for i in range(12)]
e_skew_bit_dec(self.args[0], 64, *skew_res)
gldsi(c[0], 0)
gldsi(first_4bit_d[0], 0)
gldsi(d_4bit_block[0][0], 0)
gldsi(d_4bit_block[1][0], 1)
gldsi(d_5bit_block[0][0], 0)
gldsi(d_5bit_block[1][0], 1)
gldsi(d_6bit_block[0][0], 0)
gldsi(d_6bit_block[1][0], 1)
gldsi(d_7bit_block[0][0], 0)
gldsi(d_7bit_block[1][0], 1)
gldsi(d_8bit_block[0][0], 0)
gldsi(d_8bit_block[1][0], 1)
gldsi(d_9bit_block[0][0], 0)
gldsi(d_9bit_block[1][0], 1)
gldsi(d_10bit_block[0][0], 0)
gldsi(d_10bit_block[1][0], 1)
gldsi(d_11bit_block[0][0], 0)
gldsi(d_11bit_block[1][0], 1)
# compute all [z] and [c]
for j in range(64):
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j + 1], in_c_prod[j], skew_res[3 * j + 2])
# compute c_xor_z
gadds(c_xor_z[j], c[j], z[j])
# compute for first 4 bit and next 4bit
for j in range(4):
# for frist_4_bit_d
gadds(c_xor_d[0][j], c[j], first_4bit_d[j])
gaddsi(in_d_left[0][j], c_xor_d[0][j], 1)
gmuls(in_d_prod[0][j], in_d_left[0][j], c_xor_z[j])
# ge_startmult(in_d_left[0][j], c_xor_z[j])
# ge_stopmult(in_d_prod[0][j])
gadds(first_4bit_d[j + 1], in_d_prod[0][j], z[j])
# compute [x|j]
gadds(self.args[2 + j], c_xor_z[j], first_4bit_d[j])
for i in range(2):
# for other block
# first bit of 4bit_block = 4th bit
gadds(c_xor_d[i][4+j], c[4+j], d_4bit_block[i][j])
gaddsi(in_d_left[i][4+j], c_xor_d[i][4+j], 1)
gmuls(in_d_prod[i][4+j], in_d_left[i][4+j], c_xor_z[4+j])
# ge_startmult(in_d_left[i][4+j], c_xor_z[4+j])
# ge_stopmult(in_d_prod[i][4+j])
gadds(d_4bit_block[i][j+1], in_d_prod[i][4+j], z[4+j])
# compute for next 5bit
for j in range(5):
for i in range(2):
# first bit of 5bit_block = 8th bit
gadds(c_xor_d[i][8+j], c[8+j], d_5bit_block[i][j])
gaddsi(in_d_left[i][8+j], c_xor_d[i][8+j], 1)
gmuls(in_d_prod[i][8+j], in_d_left[i][8+j], c_xor_z[8+j])
# ge_startmult(in_d_left[i][8+j], c_xor_z[8+j])
# ge_stopmult(in_d_prod[i][8+j])
gadds(d_5bit_block[i][j+1], in_d_prod[i][8+j], z[8+j])
# compute for next 6bit
for j in range(6):
for i in range(2):
# first bit of 6bit_block = 13th bit
gadds(c_xor_d[i][13+j], c[13+j], d_6bit_block[i][j])
gaddsi(in_d_left[i][13+j], c_xor_d[i][13+j], 1)
gmuls(in_d_prod[i][13+j], in_d_left[i][13+j], c_xor_z[13+j])
# ge_startmult(in_d_left[i][13+j], c_xor_z[13+j])
# ge_stopmult(in_d_prod[i][13+j])
gadds(d_6bit_block[i][j+1], in_d_prod[i][13+j], z[13+j])
# compute for next 7bit
for j in range(7):
for i in range(2):
# first bit of 7bit_block = 19th bit
gadds(c_xor_d[i][19+j], c[19+j], d_7bit_block[i][j])
gaddsi(in_d_left[i][19+j], c_xor_d[i][19+j], 1)
gmuls(in_d_prod[i][19+j], in_d_left[i][19+j], c_xor_z[19+j])
# ge_startmult(in_d_left[i][19+j], c_xor_z[19+j])
# ge_stopmult(in_d_prod[i][19+j])
gadds(d_7bit_block[i][j+1], in_d_prod[i][19+j], z[19+j])
# compute for next 8bit
for j in range(8):
for i in range(2):
# first bit of 8bit_block = 26th bit
gadds(c_xor_d[i][26 + j], c[26 + j], d_8bit_block[i][j])
gaddsi(in_d_left[i][26 + j], c_xor_d[i][26 + j], 1)
gmuls(in_d_prod[i][26 + j], in_d_left[i][26 + j], c_xor_z[26 + j])
# ge_startmult(in_d_left[i][26 + j], c_xor_z[26 + j])
# ge_stopmult(in_d_prod[i][26 + j])
gadds(d_8bit_block[i][j + 1], in_d_prod[i][26 + j], z[26 + j])
# compute for next 9bit
for j in range(9):
for i in range(2):
# first bit of 9bit_block = 34th bit
gadds(c_xor_d[i][34 + j], c[34 + j], d_9bit_block[i][j])
gaddsi(in_d_left[i][34 + j], c_xor_d[i][34 + j], 1)
gmuls(in_d_prod[i][34 + j], in_d_left[i][34 + j], c_xor_z[34 + j])
# ge_startmult(in_d_left[i][34 + j], c_xor_z[34 + j])
# ge_stopmult(in_d_prod[i][34 + j])
gadds(d_9bit_block[i][j + 1], in_d_prod[i][34 + j], z[34 + j])
# compute for next 10bit
for j in range(10):
for i in range(2):
# first bit of 10bit_block = 43th bit
gadds(c_xor_d[i][43 + j], c[43 + j], d_10bit_block[i][j])
gaddsi(in_d_left[i][43 + j], c_xor_d[i][43 + j], 1)
gmuls(in_d_prod[i][43 + j], in_d_left[i][43 + j], c_xor_z[43 + j])
# ge_startmult(in_d_left[i][43 + j], c_xor_z[43 + j])
# ge_stopmult(in_d_prod[i][43 + j])
gadds(d_10bit_block[i][j + 1], in_d_prod[i][43 + j], z[43 + j])
# compute for next 11bit
for j in range(11):
for i in range(2):
# first bit of 11bit_block = 53th bit
gadds(c_xor_d[i][53 + j], c[53 + j], d_11bit_block[i][j])
gaddsi(in_d_left[i][53 + j], c_xor_d[i][53 + j], 1)
gmuls(in_d_prod[i][53 + j], in_d_left[i][53 + j], c_xor_z[53 + j])
# ge_startmult(in_d_left[i][53 + j], c_xor_z[53 + j])
# ge_stopmult(in_d_prod[i][53 + j])
gadds(d_11bit_block[i][j + 1], in_d_prod[i][53 + j], z[53 + j])
# connect first 4bit and next 4bit block
selected_d_4bit_block = [program.curr_block.new_reg('sg') for i in range(5)]
for j in range(5):
# compute MUX
gadds(in_mux_right_4[j], d_4bit_block[0][j], d_4bit_block[1][j])
gmuls(in_mux_prod_4[j], in_mux_right_4[j], first_4bit_d[4])
# ge_startmult(in_mux_right_4[j], first_4bit_d[4])
# ge_stopmult(in_mux_prod_4[j])
gadds(selected_d_4bit_block[j], in_mux_prod_4[j], d_4bit_block[0][j])
if j < 4:
# compute [x|j]
gadds(self.args[2 + (4 + j)], c_xor_z[4 + j], selected_d_4bit_block[j])
# connect 4bit block and next 5bit block
selected_d_5bit_block = [program.curr_block.new_reg('sg') for i in range(6)]
for j in range(6):
# compute MUX
gadds(in_mux_right_5[j], d_5bit_block[0][j], d_5bit_block[1][j])
gmuls(in_mux_prod_5[j], in_mux_right_5[j], selected_d_4bit_block[4])
# ge_startmult(in_mux_right_5[j], selected_d_4bit_block[4])
# ge_stopmult(in_mux_prod_5[j])
gadds(selected_d_5bit_block[j], in_mux_prod_5[j], d_5bit_block[0][j])
if j < 5:
# compute [x|j]
gadds(self.args[2 + (8 + j)], c_xor_z[8 + j], selected_d_5bit_block[j])
# connect 5bit block and next 6bit block
selected_d_6bit_block = [program.curr_block.new_reg('sg') for i in range(7)]
for j in range(7):
# compute MUX
gadds(in_mux_right_6[j], d_6bit_block[0][j], d_6bit_block[1][j])
gmuls(in_mux_prod_6[j], in_mux_right_6[j], selected_d_5bit_block[5])
# ge_startmult(in_mux_right_6[j], selected_d_5bit_block[5])
# ge_stopmult(in_mux_prod_6[j])
gadds(selected_d_6bit_block[j], in_mux_prod_6[j], d_6bit_block[0][j])
if j < 6:
# compute [x|j]
gadds(self.args[2 + (13 + j)], c_xor_z[13 + j], selected_d_6bit_block[j])
# connect 6bit block and next 7bit block
selected_d_7bit_block = [program.curr_block.new_reg('sg') for i in range(8)]
for j in range(8):
# compute MUX
gadds(in_mux_right_7[j], d_7bit_block[0][j], d_7bit_block[1][j])
gmuls(in_mux_prod_7[j], in_mux_right_7[j], selected_d_6bit_block[6])
# ge_startmult(in_mux_right_7[j], selected_d_6bit_block[6])
# ge_stopmult(in_mux_prod_7[j])
gadds(selected_d_7bit_block[j], in_mux_prod_7[j], d_7bit_block[0][j])
if j < 7:
# compute [x|j]
gadds(self.args[2 + (19 + j)], c_xor_z[19 + j], selected_d_7bit_block[j])
# connect 7bit block and next 8bit block
selected_d_8bit_block = [program.curr_block.new_reg('sg') for i in range(9)]
for j in range(9):
# compute MUX
gadds(in_mux_right_8[j], d_8bit_block[0][j], d_8bit_block[1][j])
gmuls(in_mux_prod_8[j], in_mux_right_8[j], selected_d_7bit_block[7])
# ge_startmult(in_mux_right_8[j], selected_d_7bit_block[7])
# ge_stopmult(in_mux_prod_8[j])
gadds(selected_d_8bit_block[j], in_mux_prod_8[j], d_8bit_block[0][j])
if j < 8:
# compute [x|j]
gadds(self.args[2 + (26 + j)], c_xor_z[26 + j], selected_d_8bit_block[j])
# connect 8bit block and next 9bit block
selected_d_9bit_block = [program.curr_block.new_reg('sg') for i in range(10)]
for j in range(10):
# compute MUX
gadds(in_mux_right_9[j], d_9bit_block[0][j], d_9bit_block[1][j])
gmuls(in_mux_prod_9[j], in_mux_right_9[j], selected_d_8bit_block[8])
# ge_startmult(in_mux_right_9[j], selected_d_8bit_block[8])
# ge_stopmult(in_mux_prod_9[j])
gadds(selected_d_9bit_block[j], in_mux_prod_9[j], d_9bit_block[0][j])
if j < 9:
# compute [x|j]
gadds(self.args[2 + (34 + j)], c_xor_z[34 + j], selected_d_9bit_block[j])
# connect 9bit block and next 10bit block
selected_d_10bit_block = [program.curr_block.new_reg('sg') for i in range(11)]
for j in range(11):
# compute MUX
gadds(in_mux_right_10[j], d_10bit_block[0][j], d_10bit_block[1][j])
gmuls(in_mux_prod_10[j], in_mux_right_10[j], selected_d_9bit_block[9])
# ge_startmult(in_mux_right_10[j], selected_d_9bit_block[9])
# ge_stopmult(in_mux_prod_10[j])
gadds(selected_d_10bit_block[j], in_mux_prod_10[j], d_10bit_block[0][j])
if j < 10:
# compute [x|j]
gadds(self.args[2 + (43 + j)], c_xor_z[43 + j], selected_d_10bit_block[j])
# connect 10bit block and next 11bit block
selected_d_11bit_block = [program.curr_block.new_reg('sg') for i in range(12)]
for j in range(11):
# compute MUX
gadds(in_mux_right_11[j], d_11bit_block[0][j], d_11bit_block[1][j])
gmuls(in_mux_prod_11[j], in_mux_right_11[j], selected_d_10bit_block[10])
# ge_startmult(in_mux_right_11[j], selected_d_10bit_block[10])
# ge_stopmult(in_mux_prod_11[j])
gadds(selected_d_11bit_block[j], in_mux_prod_11[j], d_11bit_block[0][j])
# compute [x|j]
gadds(self.args[2 + (53 + j)], c_xor_z[53 + j], selected_d_11bit_block[j])
#decomposition : square_root(n) round ver. (end)
elif type_of_decomposition == 'round_log':
#decomposition : log(n) round ver. (start)
log_val = int(math.ceil(math.log(self.args[1], 2)))
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * self.args[1])]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c = [program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_d = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
in_d_left = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
in_d_prod = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
d = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
in_mux_right = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
in_mux_prod = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
gldsi(c[0],0)
gldsi(d[log_val - 1][0][0], 0)
e_skew_bit_dec(self.args[0], self.args[1], *skew_res)
# compute all [z] and [c]
for j in range(self.args[1]):
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j + 1], in_c_prod[j], skew_res[3 * j + 2])
# compute c_xor_z
gadds(c_xor_z[j], c[j], z[j])
# compute all [d] -- assume that self.args[1] >= 8
for k in range(log_val - 1):
valid_carry_idx = 2 ** (k + 1)
# print("valid_carry_idx = {0}".format(valid_carry_idx))
if k == 0:
# compute candidate of [d]
for j in range(2):
for i in range(self.args[1]):
if (j == 0) and (i == 0):
gadds(c_xor_d[k][0][i], c[i], d[log_val - 1][0][i])
gaddsi(in_d_left[k][0][i], c_xor_d[k][0][i], 1)
gmuls(in_d_prod[k][0][i], in_d_left[k][0][i], c_xor_z[i])
# ge_startmult(in_d_left[k][0][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][0][i])
gadds(d[log_val - 1][0][i+1], in_d_prod[k][0][i], z[i])
elif (j == 0) and (i == 1):
gadds(c_xor_d[k][0][i], c[i], d[log_val - 1][0][i])
gaddsi(in_d_left[k][0][i], c_xor_d[k][0][i], 1)
gmuls(in_d_prod[k][0][i], in_d_left[k][0][i], c_xor_z[i])
# ge_startmult(in_d_left[k][0][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][0][i])
gadds(d[log_val - 1][0][i+1], in_d_prod[k][0][i], z[i])
elif (i >= 2) and (i % 2 == 0):
gaddsi(c_xor_d[k][j][i], c[i], j)
gaddsi(in_d_left[k][j][i], c_xor_d[k][j][i], 1)
gmuls(in_d_prod[k][j][i], in_d_left[k][j][i], c_xor_z[i])
# ge_startmult(in_d_left[k][j][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][j][i])
gadds(d[k][j][i+1], in_d_prod[k][j][i], z[i])
elif (i >= 2) and (i % 2 == 1):
gadds(c_xor_d[k][j][i], c[i], d[k][j][i])
gaddsi(in_d_left[k][j][i], c_xor_d[k][j][i], 1)
gmuls(in_d_prod[k][j][i], in_d_left[k][j][i], c_xor_z[i])
# ge_startmult(in_d_left[k][j][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][j][i])
gadds(d[k][j][i+1], in_d_prod[k][j][i], z[i])
# select and connect blocks of [d]
for j in range(2):
for i in range(1, self.args[1]):
if (j == 0) and (i == valid_carry_idx):
for connect_idx in range(valid_carry_idx, 2 * valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[log_val - 1][0][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
elif (i >= 2 * valid_carry_idx) and (i % (2 * valid_carry_idx) == valid_carry_idx -1):
d[k + 1][j][i] = d[k][j][i]
elif (i >= 2 * valid_carry_idx) and (i % (2 * valid_carry_idx) == valid_carry_idx):
for connect_idx in range(i, i + valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[k + 1][j][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
if connect_idx == i:
d[k+1][j][i] = d[k][j][i]
else:
# select and connect blocks of [d]
for j in range(2):
count = 1
for i in range(1, self.args[1]):
finished_block = 2 * count
if (j == 0) and (i == valid_carry_idx):
for connect_idx in range(valid_carry_idx, 2 * valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[log_val - 1][0][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
elif (i >= finished_block * valid_carry_idx) and (i % (2 * valid_carry_idx) > 0) and (i % (2 * valid_carry_idx) <= valid_carry_idx - 1) and (k <= (log_val - 2)):
d[k + 1][j][i] = d[k][j][i]
elif (i >= finished_block * valid_carry_idx) and (i % (2 * valid_carry_idx) >= valid_carry_idx) and (k <= (log_val - 2)):
for connect_idx in range(i, i + valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[k + 1][j][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
if connect_idx == i:
d[k + 1][j][i] = d[k][j][i]
if connect_idx == i + valid_carry_idx - 1:
count += 1
# compute [x|j]
for i in range(self.args[1]):
gadds(self.args[2 + i], c_xor_z[i], d[log_val - 1][0][i])
# decomposition : log(n) round ver. (end)
else:
# decomposition : n-1 round ver. (start)
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * self.args[1])]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_d = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_d_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_d_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
d = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
e_skew_bit_dec(self.args[0], self.args[1], *skew_res)
gldsi(c[0], 0)
gldsi(d[0], 0)
for j in range(self.args[1]):
if self.args[1] == 1:
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(self.args[2 + j], skew_res[3 * j + 2], x1_xor_x2[j])
else:
if j == self.args[1] - 1:
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute c_xor_d[j]
gadds(c_xor_d[j], c[j], d[j])
# compute [x|j]
gadds(self.args[2 + j], z[j], c_xor_d[j])
else:
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j+1], in_c_prod[j], skew_res[3 * j + 2])
# compute [d]
gadds(c_xor_d[j], c[j], d[j])
gaddsi(in_d_left[j], c_xor_d[j], 1)
gadds(c_xor_z[j], c[j], z[j])
gmuls(in_d_prod[j], in_d_left[j], c_xor_z[j])
# ge_startmult(in_d_left[j], c_xor_z[j])
# ge_stopmult(in_d_prod[j])
gadds(d[j + 1], in_d_prod[j], z[j])
# compute [x|j]
gadds(self.args[2 + j], z[j], c_xor_d[j])
# decomposition : n-1 round ver. (end)
#@base.gf2n
@base.vectorize
class e_bitinj(base.CISC):
r""" Convert a share mod 2 to the share mod 2^n """
__slots__ = []
code = base.opcodes['E_BITINJ']
arg_format = ['sg', 'sw']
def expand(self):
x1 = program.curr_block.new_reg('s')
x2 = program.curr_block.new_reg('s')
x3 = program.curr_block.new_reg('s')
sum12 = program.curr_block.new_reg('s')
sum123 = program.curr_block.new_reg('s')
prod12 = program.curr_block.new_reg('s')
twice_prod12 = program.curr_block.new_reg('s')
twice_x3 = program.curr_block.new_reg('s')
round2_right = program.curr_block.new_reg('s')
round2_prod = program.curr_block.new_reg('s')
res_left = program.curr_block.new_reg('s')
#e_skew_inj(self.args[0], x1, x2, x3)
e_skew_bit_inj(self.args[0], x1, x2, x3)
# compute [x1] + [x2] +[x3]
adds(sum12, x1, x2)
adds(sum123, x3, sum12)
# compute [x1] * [x2]
muls(prod12, x1, x2)
# e_startmult(x1, x2)
# e_stopmult(prod12)
# * 2
mulsi(twice_prod12, prod12, 2)
mulsi(twice_x3, x3, 2)
# compute ([x1] + [x2] - 2 * [x1] * [x2])
subs(round2_right, sum12, twice_prod12)
muls(round2_prod, twice_x3, round2_right)
# e_startmult(twice_x3, round2_right)
# e_stopmult(round2_prod)
# compute result
subs(res_left, sum123, twice_prod12)
subs(self.args[1], res_left, round2_prod)
"""
# DEBUG MODE
x1 = program.curr_block.new_reg('s')
x2 = program.curr_block.new_reg('s')
x3 = program.curr_block.new_reg('s')
c1 = program.curr_block.new_reg('c')
c2 = program.curr_block.new_reg('c')
c3 = program.curr_block.new_reg('c')
c_sum123 = program.curr_block.new_reg('c')
c_prod12 = program.curr_block.new_reg('c')
c_twice_prod12 = program.curr_block.new_reg('c')
c_twice_x3 = program.curr_block.new_reg('c')
c_round2_right = program.curr_block.new_reg('c')
c_round2_prod = program.curr_block.new_reg('c')
sum12 = program.curr_block.new_reg('s')
sum123 = program.curr_block.new_reg('s')
prod12 = program.curr_block.new_reg('s')
twice_prod12 = program.curr_block.new_reg('s')
twice_x3 = program.curr_block.new_reg('s')
round2_right = program.curr_block.new_reg('s')
round2_prod = program.curr_block.new_reg('s')
res_left = program.curr_block.new_reg('s')
e_skew_inj(self.args[0], x1, x2, x3)
# DEBUG (START)
startopen(x1, x2, x3)
stopopen(c1, c2, c3)
print_reg_plain(c1)
print_char('\n')
print_reg_plain(c2)
print_char('\n')
print_reg_plain(c3)
print_char('\n')
# DEBUG (END)
# compute [x1] + [x2] +[x3]
adds(sum12, x1, x2)
adds(sum123, x3, sum12)
# DEBUG (START)
startopen(sum123)
stopopen(c_sum123)
print_reg_plain(c_sum123)
print_char('\n')
# DEBUG (END)
# compute [x1] * [x2]
e_startmult(x1, x2)
e_stopmult(prod12)
# DEBUG (START)
startopen(prod12)
stopopen(c_prod12)
print_reg_plain(c_prod12)
print_char('\n')
# DEBUG (END)
# * 2
mulsi(twice_prod12, prod12, 2)
mulsi(twice_x3, x3, 2)
# DEBUG (START)
startopen(twice_prod12, twice_x3)
stopopen(c_twice_prod12, c_twice_x3)
print_reg_plain(c_twice_prod12)
print_char('\n')
print_reg_plain(c_twice_x3)
print_char('\n')
# DEBUG (END)
# compute ([x1] + [x2] - 2 * [x1] * [x2])
subs(round2_right, sum12, twice_prod12)
# DEBUG (START)
startopen(round2_right)
stopopen(c_round2_right)
print_reg_plain(c_round2_right)
print_char('\n')
# DEBUG (END)
e_startmult(twice_x3, round2_right)
e_stopmult(round2_prod)
# DEBUG (START)
startopen(round2_prod)
stopopen(c_round2_prod)
print_reg_plain(c_round2_prod)
print_char('\n')
# DEBUG (END)
# compute result
subs(res_left, sum123, twice_prod12)
subs(self.args[1], res_left, round2_prod)
"""
@base.vectorize
class e_bitrec(base.CISC):
r""" Convert an n-array of shares mod 2 to a share mod 2^n. """
__slots__ = []
code = base.opcodes['E_BITREC']
arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
def expand(self):
# self.args[1] is the number of array's elements
# assume that 0 < self.args[1] <= ring_size
# re-composition using bit-injection (start)
# injected_a = [program.curr_block.new_reg('s') for i in range(self.args[1])]
# two_power_a = [program.curr_block.new_reg('s') for i in range(self.args[1])]
# res = [program.curr_block.new_reg('s') for i in range(self.args[1])]
#
# if self.args[1] > 1:
# for i in range(self.args[1]):
# e_bitinj(self.args[2+i], injected_a[i])
# if i == 0:
# two_power_a[i] = injected_a[i]
# elif i == 1:
# mulsi(two_power_a[i], injected_a[i], 2)
# else:
# tmp_two_power_a = [program.curr_block.new_reg('s') for z in range(i+1)]
# for j in range(i+1):
# if j == 0:
# tmp_two_power_a[j] = injected_a[i]
# elif j == i:
# mulsi(two_power_a[i], tmp_two_power_a[j - 1], 2)
# else:
# mulsi(tmp_two_power_a[j], tmp_two_power_a[j - 1], 2)
#
# res[0] = two_power_a[0]
# for i in range(1, self.args[1]):
# if i == self.args[1] - 1:
# adds(self.args[0], two_power_a[i], res[i - 1])
# elif i == 1:
# adds(res[i], two_power_a[i], two_power_a[i - 1])
# else:
# adds(res[i], two_power_a[i], res[i - 1])
# else:
# e_bitinj(self.args[2], self.args[0])
# re-composition using bit-injection (end)
# re-composition: n-1 round ver. (end)
ring_size = 64
bit_s = [program.curr_block.new_reg('sg') for i in range(ring_size)]
c_xor_d = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x1 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x2 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x3 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x12 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x13 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in1_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
c = [program.curr_block.new_reg('sg') for i in range(ring_size + 1)]
c_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in2_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in2_right = [program.curr_block.new_reg('sg') for i in range(ring_size)]
d = [program.curr_block.new_reg('sg') for i in range(ring_size + 1)]
d_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
zero_shares = [program.curr_block.new_reg('sg') for i in range(ring_size - self.args[1])]
gldsi(c[0], 0)
gldsi(d[0], 0)
for j in range(ring_size - self.args[1]):
gldsi(zero_shares[j], 0)
for j in range(ring_size):
if j == 0:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
e_skew_bit_rec(bit_s[j], x1[j], x2[j], x3[j])
# compute 1bit carry "c"
gadds(x12[j], x1[j], x2[j])
gaddsi(in1_left[j], x12[j], 1)
gadds(x13[j], x1[j], x3[j])
gmuls(c_left[j], in1_left[j], x13[j])
# ge_startmult(in1_left[j], x13[j])
# ge_stopmult(c_left[j])
gadds(c[j + 1], c_left[j], x3[j])
# compute 2bit carry "d"
gaddsi(in2_left[j], c_xor_d[j], 1)
gadds(in2_right[j], c[j], bit_s[j])
gmuls(d_left[j], in2_left[j], in2_right[j])
# ge_startmult(in2_left[j], in2_right[j])
# ge_stopmult(d_left[j])
gadds(d[j + 1], d_left[j], bit_s[j])
elif j == ring_size - 1:
if j < self.args[1]:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
else:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], zero_shares[j - self.args[1]])
# compute 1bit carry "c" - skip
# compute 2bit carry "d" - skip
else:
if j < self.args[1]:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
else:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], zero_shares[j - self.args[1]])
e_skew_bit_rec(bit_s[j], x1[j], x2[j], x3[j])
# compute 1bit carry "c"
gadds(x12[j], x1[j], x2[j])
gaddsi(in1_left[j], x12[j], 1)
gadds(x13[j], x1[j], x3[j])
gmuls(c_left[j], in1_left[j], x13[j])
# ge_startmult(in1_left[j], x13[j])
# ge_stopmult(c_left[j])
gadds(c[j + 1], c_left[j], x3[j])
# compute 2bit carry "d"
gaddsi(in2_left[j], c_xor_d[j], 1)
gadds(in2_right[j], c[j], bit_s[j])
gmuls(d_left[j], in2_left[j], in2_right[j])
# ge_startmult(in2_left[j], in2_right[j])
# ge_stopmult(d_left[j])
gadds(d[j + 1], d_left[j], bit_s[j])
e_skew_ring_rec(self.args[0], ring_size, *bit_s)
# re-composition: n-1 round ver. (end)
#@base.gf2n
@base.vectorize
class e_read_from_file(base.CISC):
r""" Convert a share mod 2^n to n-array of shares mod 2. """
__slots__ = []
code = base.opcodes['E_READ_FROM_FILE']
arg_format = tools.chain(['s', 'int', 'int'], itertools.repeat('sw'))
def expand(self):
res = [program.curr_block.new_reg('s') for i in range(self.args[2])]
for j in range(self.args[2]):
res[j] = self.args[3+j]
e_input_share_int(self.args[1], self.args[2], *res)
@base.vectorize
class ge_read_from_file(base.CISC):
r""" Convert a share mod 2^n to n-array of shares mod 2. """
__slots__ = []
code = base.opcodes['GE_READ_FROM_FILE']
arg_format = tools.chain(['sg', 'int', 'int'], itertools.repeat('sgw'))
def expand(self):
res = [program.curr_block.new_reg('sg') for i in range(self.args[2])]
for j in range(self.args[2]):
res[j] = self.args[3+j]
ge_input_share_int(self.args[1], self.args[2], *res)
#@base.vectorize
#class e_ringcmp(base.Instruction):
#r""" Convert an n-array of shares mod 2 to a share mod 2^n. """
#__slots__ = []
#code = base.opcodes['E_RING_CMP']
#arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
@base.vectorize
class e_input_share_int(base.Instruction):
r""" Read input from file as token. """
__slots__ = []
code = base.opcodes['E_INPUT_SHARE_INT']
arg_format = tools.chain(['int', 'int'], itertools.repeat('sw'))
@base.vectorize
class ge_input_share_int(base.Instruction):
r""" Read input from file as token. """
__slots__ = []
code = base.opcodes['GE_INPUT_SHARE_INT']
arg_format = tools.chain(['int', 'int'], itertools.repeat('sgw'))
@base.vectorize
class e_multi_startmult(startopen_class):
""" Start opening secret register $s_i$. """
__slots__ = []
code = base.opcodes['E_MULTI_STARTMULT']
arg_format = itertools.repeat('s')
@base.vectorize
class e_multi_stopmult(stopopen_class):
""" Store previous opened value in $c_i$. """
__slots__ = []
code = base.opcodes['E_MULTI_STOPMULT']
arg_format = itertools.repeat('sw')
@base.gf2n
@base.vectorize
class e_startmult(startopen_class):
""" Start opening secret register $s_i$. """
__slots__ = []
code = base.opcodes['E_STARTMULT']
arg_format = itertools.repeat('s')
@base.gf2n
@base.vectorize
class e_stopmult(stopopen_class):
""" Store previous opened value in $c_i$. """
__slots__ = []
code = base.opcodes['E_STOPMULT']
arg_format = itertools.repeat('sw')
@base.gf2n
@base.vectorize
class muls(base.CISC):
""" Secret multiplication $s_i = s_j \cdot s_k$. """
__slots__ = []
arg_format = ['sw','s','s']
def expand(self):
e_mult(self.args[0], self.args[1], self.args[2])
# e_startmult(self.args[1],self.args[2])
# e_stopmult(self.args[0])
"""
s = [program.curr_block.new_reg('s') for i in range(9)]
c = [program.curr_block.new_reg('c') for i in range(3)]
triple(s[0], s[1], s[2])
subs(s[3], self.args[1], s[0])
subs(s[4], self.args[2], s[1])
startopen(s[3], s[4])
stopopen(c[0], c[1])
mulm(s[5], s[1], c[0])
mulm(s[6], s[0], c[1])
mulc(c[2], c[0], c[1])
adds(s[7], s[2], s[5])
adds(s[8], s[7], s[6])
addm(self.args[0], s[8], c[2])
"""
""" Extended (NEC) secret multiplication $s_i = s_j \cdot s_k$. """
#emuls(self.args[0],self.args[1],self.args[2])
"""
s = [program.curr_block.new_reg('s') for i in range(9)]
c = [program.curr_block.new_reg('c') for i in range(3)]
triple(s[0], s[1], s[2])
esubs(s[3], self.args[1], s[0])
esubs(s[4], self.args[2], s[1])
estartopen(s[3], s[4])
estopopen(c[0], c[1])
emulm(s[5], s[1], c[0])
emulm(s[6], s[0], c[1])
mulc(c[2], c[0], c[1])
eadds(s[7], s[2], s[5])
eadds(s[8], s[7], s[6])
eaddm(self.args[0], s[8], c[2])
"""
#@base.gf2n
#@base.vectorize
#class emuls(base.AddBase):
""" Secret multiplication $s_i = s_j \cdot s_k$. """
# code = base.opcodes['EMULS']
# __slots__ = []
# arg_format = ['sw','s','s']
@base.gf2n
@base.vectorize
class sqrs(base.CISC):
""" Secret squaring $s_i = s_j \cdot s_j$. """
__slots__ = []
arg_format = ['sw', 's']
def expand(self):
s = [program.curr_block.new_reg('s') for i in range(6)]
c = [program.curr_block.new_reg('c') for i in range(2)]
square(s[0], s[1])
subs(s[2], self.args[1], s[0])
asm_open(c[0], s[2])
mulc(c[1], c[0], c[0])
mulm(s[3], self.args[1], c[0])
adds(s[4], s[3], s[3])
adds(s[5], s[1], s[4])
subml(self.args[0], s[5], c[1])
@base.gf2n
@base.vectorize
class lts(base.CISC):
""" Secret comparison $s_i = (s_j < s_k)$. """
__slots__ = []
arg_format = ['sw', 's', 's', 'int', 'int']
def expand(self):
a = program.curr_block.new_reg('s')
subs(a, self.args[1], self.args[2])
comparison.LTZ(self.args[0], a, self.args[3], self.args[4])
@base.vectorize
class g2muls(base.CISC):
r""" Secret GF(2) multiplication """
__slots__ = []
arg_format = ['sgw','sg','sg']
def expand(self):
s = [program.curr_block.new_reg('sg') for i in range(9)]
c = [program.curr_block.new_reg('cg') for i in range(3)]
gbittriple(s[0], s[1], s[2])
gsubs(s[3], self.args[1], s[0])
gsubs(s[4], self.args[2], s[1])
gstartopen(s[3], s[4])
gstopopen(c[0], c[1])
gmulbitm(s[5], s[1], c[0])
gmulbitm(s[6], s[0], c[1])
gmulbitc(c[2], c[0], c[1])
gadds(s[7], s[2], s[5])
gadds(s[8], s[7], s[6])
gaddm(self.args[0], s[8], c[2])
#@base.vectorize
#class gmulbits(base.CISC):
# r""" Secret $GF(2^n) \times GF(2)$ multiplication """
# __slots__ = []
# arg_format = ['sgw','sg','sg']
#
# def expand(self):
# s = [program.curr_block.new_reg('s') for i in range(9)]
# c = [program.curr_block.new_reg('c') for i in range(3)]
# g2ntriple(s[0], s[1], s[2])
# subs(s[3], self.args[1], s[0])
# subs(s[4], self.args[2], s[1])
# startopen(s[3], s[4])
# stopopen(c[0], c[1])
# mulm(s[5], s[1], c[0])
# mulm(s[6], s[0], c[1])
# mulc(c[2], c[0], c[1])
# adds(s[7], s[2], s[5])
# adds(s[8], s[7], s[6])
# addm(self.args[0], s[8], c[2])
# hack for circular dependency
from Compiler import comparison
| 34.368144
| 189
| 0.566096
|
import itertools
import tools
from random import randint
from Compiler.config import *
from Compiler.exceptions import *
import Compiler.instructions_base as base
import math
_python_input = input
_slots__ = []
code = base.opcodes['LDI']
arg_format = ['cw','i']
def execute(self):
self.args[0].value = self.args[1]
@base.gf2n
@base.vectorize
class ldsi(base.Instruction):
__slots__ = []
code = base.opcodes['LDSI']
arg_format = ['sw','i']
def execute(self):
self.args[0].value = self.args[1]
@base.gf2n
@base.vectorize
class ldmc(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
__slots__ = ["code"]
code = base.opcodes['LDMC']
arg_format = ['cw','int']
def execute(self):
self.args[0].value = program.mem_c[self.args[1]]
@base.gf2n
@base.vectorize
class ldms(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
__slots__ = ["code"]
code = base.opcodes['LDMS']
arg_format = ['sw','int']
def execute(self):
self.args[0].value = program.mem_s[self.args[1]]
@base.gf2n
@base.vectorize
class stmc(base.DirectMemoryWriteInstruction):
__slots__ = ["code"]
code = base.opcodes['STMC']
arg_format = ['c','int']
def execute(self):
program.mem_c[self.args[1]] = self.args[0].value
@base.gf2n
@base.vectorize
class stms(base.DirectMemoryWriteInstruction):
__slots__ = ["code"]
code = base.opcodes['STMS']
arg_format = ['s','int']
def execute(self):
program.mem_s[self.args[1]] = self.args[0].value
@base.vectorize
class ldmint(base.DirectMemoryInstruction, base.ReadMemoryInstruction):
__slots__ = ["code"]
code = base.opcodes['LDMINT']
arg_format = ['ciw','int']
def execute(self):
self.args[0].value = program.mem_i[self.args[1]]
@base.vectorize
class stmint(base.DirectMemoryWriteInstruction):
__slots__ = ["code"]
code = base.opcodes['STMINT']
arg_format = ['ci','int']
def execute(self):
program.mem_i[self.args[1]] = self.args[0].value
@base.vectorize
class ldmci(base.ReadMemoryInstruction):
code = base.opcodes['LDMCI']
arg_format = ['cw','ci']
def execute(self):
self.args[0].value = program.mem_c[self.args[1].value]
@base.vectorize
class ldmsi(base.ReadMemoryInstruction):
code = base.opcodes['LDMSI']
arg_format = ['sw','ci']
def execute(self):
self.args[0].value = program.mem_s[self.args[1].value]
@base.vectorize
class stmci(base.WriteMemoryInstruction):
code = base.opcodes['STMCI']
arg_format = ['c','ci']
def execute(self):
program.mem_c[self.args[1].value] = self.args[0].value
@base.vectorize
class stmsi(base.WriteMemoryInstruction):
code = base.opcodes['STMSI']
arg_format = ['s','ci']
def execute(self):
program.mem_s[self.args[1].value] = self.args[0].value
@base.vectorize
class ldminti(base.ReadMemoryInstruction):
code = base.opcodes['LDMINTI']
arg_format = ['ciw','ci']
def execute(self):
self.args[0].value = program.mem_i[self.args[1].value]
@base.vectorize
class stminti(base.WriteMemoryInstruction):
code = base.opcodes['STMINTI']
arg_format = ['ci','ci']
def execute(self):
program.mem_i[self.args[1].value] = self.args[0].value
@base.vectorize
class gldmci(base.ReadMemoryInstruction):
code = base.opcodes['LDMCI'] + 0x100
arg_format = ['cgw','ci']
def execute(self):
self.args[0].value = program.mem_c[self.args[1].value]
@base.vectorize
class gldmsi(base.ReadMemoryInstruction):
code = base.opcodes['LDMSI'] + 0x100
arg_format = ['sgw','ci']
def execute(self):
self.args[0].value = program.mem_s[self.args[1].value]
@base.vectorize
class gstmci(base.WriteMemoryInstruction):
code = base.opcodes['STMCI'] + 0x100
arg_format = ['cg','ci']
def execute(self):
program.mem_c[self.args[1].value] = self.args[0].value
@base.vectorize
class gstmsi(base.WriteMemoryInstruction):
code = base.opcodes['STMSI'] + 0x100
arg_format = ['sg','ci']
def execute(self):
program.mem_s[self.args[1].value] = self.args[0].value
@base.gf2n
@base.vectorize
class protectmems(base.Instruction):
code = base.opcodes['PROTECTMEMS']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class protectmemc(base.Instruction):
code = base.opcodes['PROTECTMEMC']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class protectmemint(base.Instruction):
code = base.opcodes['PROTECTMEMINT']
arg_format = ['ci','ci']
@base.gf2n
@base.vectorize
class movc(base.Instruction):
__slots__ = ["code"]
code = base.opcodes['MOVC']
arg_format = ['cw','c']
def execute(self):
self.args[0].value = self.args[1].value
@base.gf2n
@base.vectorize
class movs(base.Instruction):
__slots__ = ["code"]
code = base.opcodes['MOVS']
arg_format = ['sw','s']
def execute(self):
self.args[0].value = self.args[1].value
@base.vectorize
class movint(base.Instruction):
__slots__ = ["code"]
code = base.opcodes['MOVINT']
arg_format = ['ciw','ci']
@base.vectorize
class pushint(base.StackInstruction):
code = base.opcodes['PUSHINT']
arg_format = ['ci']
@base.vectorize
class popint(base.StackInstruction):
code = base.opcodes['POPINT']
arg_format = ['ciw']
ldtn(base.Instruction):
code = base.opcodes['LDTN']
arg_format = ['ciw']
@base.vectorize
class ldarg(base.Instruction):
code = base.opcodes['LDARG']
arg_format = ['ciw']
@base.vectorize
class starg(base.Instruction):
code = base.opcodes['STARG']
arg_format = ['ci']
@base.gf2n
class reqbl(base.Instruction):
code = base.opcodes['REQBL']
arg_format = ['int']
class time(base.Instruction):
code = base.opcodes['TIME']
arg_format = []
class start(base.Instruction):
code = base.opcodes['START']
arg_format = ['i']
class stop(base.Instruction):
code = base.opcodes['STOP']
arg_format = ['i']
class use(base.Instruction):
code = base.opcodes['USE']
arg_format = ['int','int','int']
class use_inp(base.Instruction):
code = base.opcodes['USE_INP']
arg_format = ['int','int','int']
class run_tape(base.Instruction):
code = base.opcodes['RUN_TAPE']
arg_format = ['int','int','int']
class join_tape(base.Instruction):
code = base.opcodes['JOIN_TAPE']
arg_format = ['int']
class crash(base.IOInstruction):
code = base.opcodes['CRASH']
arg_format = []
@base.gf2n
class use_prep(base.Instruction):
code = base.opcodes['USE_PREP']
arg_format = ['str','int']
ase.AddBase):
__slots__ = []
code = base.opcodes['ADDC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class adds(base.AddBase):
__slots__ = []
code = base.opcodes['ADDS']
arg_format = ['sw','s','s']
@base.gf2n
@base.vectorize
class addm(base.AddBase):
__slots__ = []
code = base.opcodes['ADDM']
arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class subc(base.SubBase):
__slots__ = []
code = base.opcodes['SUBC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class subs(base.SubBase):
__slots__ = []
code = base.opcodes['SUBS']
arg_format = ['sw','s','s']
@base.gf2n
@base.vectorize
class subml(base.SubBase):
__slots__ = []
code = base.opcodes['SUBML']
arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class submr(base.SubBase):
__slots__ = []
code = base.opcodes['SUBMR']
arg_format = ['sw','c','s']
@base.gf2n
@base.vectorize
class mulc(base.MulBase):
__slots__ = []
code = base.opcodes['MULC']
arg_format = ['cw','c','c']
@base.gf2n
@base.vectorize
class mulm(base.MulBase):
__slots__ = []
code = base.opcodes['MULM']
arg_format = ['sw','s','c']
@base.gf2n
@base.vectorize
class divc(base.Instruction):
__slots__ = []
code = base.opcodes['DIVC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = self.args[1].value * pow(self.args[2].value, program.P-2, program.P) % program.P
@base.gf2n
@base.vectorize
class modc(base.Instruction):
__slots__ = []
code = base.opcodes['MODC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = self.args[1].value % self.args[2].value
@base.vectorize
class legendrec(base.Instruction):
__slots__ = []
code = base.opcodes['LEGENDREC']
arg_format = ['cw','c']
@base.vectorize
class digestc(base.Instruction):
__slots__ = []
code = base.opcodes['DIGESTC']
arg_format = ['cw','c','int']
Instruction):
__slots__ = []
code = base.opcodes['ANDC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value & self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class orc(base.Instruction):
__slots__ = []
code = base.opcodes['ORC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value | self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class xorc(base.Instruction):
__slots__ = []
code = base.opcodes['XORC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value ^ self.args[2].value) % program.P
@base.vectorize
class notc(base.Instruction):
__slots__ = []
code = base.opcodes['NOTC']
arg_format = ['cw','c', 'int']
def execute(self):
self.args[0].value = (~self.args[1].value + 2 ** self.args[2]) % program.P
@base.vectorize
class gnotc(base.Instruction):
__slots__ = []
code = (1 << 8) + base.opcodes['NOTC']
arg_format = ['cgw','cg']
def is_gf2n(self):
return True
def execute(self):
self.args[0].value = ~self.args[1].value
@base.vectorize
class gbitdec(base.Instruction):
__slots__ = []
code = base.opcodes['GBITDEC']
arg_format = tools.chain(['cg', 'int'], itertools.repeat('cgw'))
def is_g2fn(self):
return True
def has_var_args(self):
return True
@base.vectorize
class e_skew_bit_dec(base.Instruction):
__slots__ = []
code = base.opcodes['E_SKEW_BIT_DEC']
arg_format = tools.chain(['s', 'int'], itertools.repeat('sgw'))
class e_skew_bit_rec(base.Instruction):
__slots__ = []
code = base.opcodes['E_SKEW_BIT_REC']
arg_format = ['sg', 'sgw', 'sgw', 'sgw']
@base.vectorize
class e_skew_bit_inj(base.Instruction):
__slots__ = []
code = base.opcodes['E_SKEW_BIT_INJ']
arg_format = ['sg', 'sw', 'sw', 'sw']
class e_skew_ring_rec(base.Instruction):
__slots__ = []
code = base.opcodes['E_SKEW_RING_REC']
arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
@base.vectorize
class gbitcom(base.Instruction):
__slots__ = []
code = base.opcodes['GBITCOM']
arg_format = tools.chain(['cgw', 'int'], itertools.repeat('cg'))
def is_g2fn(self):
return True
def has_var_args(self):
return True
opcodes['GMULBITC']
arg_format = ['cgw','cg','cg']
def is_gf2n(self):
return True
@base.vectorize
class gmulbitm(base.MulBase):
__slots__ = []
code = base.opcodes['GMULBITM']
arg_format = ['sgw','sg','cg']
def is_gf2n(self):
return True
s__ = []
code = base.opcodes['ADDCI']
op = '__add__'
@base.gf2n
@base.vectorize
class addsi(base.SharedImmediate):
__slots__ = []
code = base.opcodes['ADDSI']
op = '__add__'
@base.gf2n
@base.vectorize
class subci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['SUBCI']
op = '__sub__'
@base.gf2n
@base.vectorize
class subsi(base.SharedImmediate):
__slots__ = []
code = base.opcodes['SUBSI']
op = '__sub__'
@base.gf2n
@base.vectorize
class subcfi(base.ClearImmediate):
__slots__ = []
code = base.opcodes['SUBCFI']
op = '__rsub__'
@base.gf2n
@base.vectorize
class subsfi(base.SharedImmediate):
__slots__ = []
code = base.opcodes['SUBSFI']
op = '__rsub__'
@base.gf2n
@base.vectorize
class mulci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['MULCI']
op = '__mul__'
@base.gf2n
@base.vectorize
class mulsi(base.SharedImmediate):
__slots__ = []
code = base.opcodes['MULSI']
op = '__mul__'
@base.gf2n
@base.vectorize
class divci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['DIVCI']
def execute(self):
self.args[0].value = self.args[1].value * pow(self.args[2], program.P-2, program.P) % program.P
@base.gf2n
@base.vectorize
class modci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['MODCI']
op = '__mod__'
@base.gf2n
@base.vectorize
class andci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['ANDCI']
op = '__and__'
@base.gf2n
@base.vectorize
class xorci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['XORCI']
op = '__xor__'
@base.gf2n
@base.vectorize
class orci(base.ClearImmediate):
__slots__ = []
code = base.opcodes['ORCI']
op = '__or__'
Instruction):
__slots__ = []
code = base.opcodes['SHLC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value << self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class shrc(base.Instruction):
__slots__ = []
code = base.opcodes['SHRC']
arg_format = ['cw','c','c']
def execute(self):
self.args[0].value = (self.args[1].value >> self.args[2].value) % program.P
@base.gf2n
@base.vectorize
class shlci(base.ClearShiftInstruction):
__slots__ = []
code = base.opcodes['SHLCI']
op = '__lshift__'
@base.gf2n
@base.vectorize
class shrci(base.ClearShiftInstruction):
__slots__ = []
code = base.opcodes['SHRCI']
op = '__rshift__'
ction):
__slots__ = ['data_type']
code = base.opcodes['TRIPLE']
arg_format = ['sw','sw','sw']
data_type = 'triple'
def execute(self):
self.args[0].value = randint(0,program.P)
self.args[1].value = randint(0,program.P)
self.args[2].value = (self.args[0].value * self.args[1].value) % program.P
@base.vectorize
class gbittriple(base.DataInstruction):
__slots__ = ['data_type']
code = base.opcodes['GBITTRIPLE']
arg_format = ['sgw','sgw','sgw']
data_type = 'bittriple'
field_type = 'gf2n'
def is_gf2n(self):
return True
@base.vectorize
class gbitgf2ntriple(base.DataInstruction):
code = base.opcodes['GBITGF2NTRIPLE']
arg_format = ['sgw','sgw','sgw']
data_type = 'bitgf2ntriple'
field_type = 'gf2n'
def is_gf2n(self):
return True
@base.gf2n
@base.vectorize
class bit(base.DataInstruction):
__slots__ = []
code = base.opcodes['BIT']
arg_format = ['sw']
data_type = 'bit'
def execute(self):
self.args[0].value = randint(0,1)
@base.gf2n
@base.vectorize
class square(base.DataInstruction):
__slots__ = []
code = base.opcodes['SQUARE']
arg_format = ['sw','sw']
data_type = 'square'
def execute(self):
self.args[0].value = randint(0,program.P)
self.args[1].value = (self.args[0].value * self.args[0].value) % program.P
@base.gf2n
@base.vectorize
class inverse(base.DataInstruction):
__slots__ = []
code = base.opcodes['INV']
arg_format = ['sw','sw']
data_type = 'inverse'
def execute(self):
self.args[0].value = randint(0,program.P)
import gmpy
self.args[1].value = int(gmpy.invert(self.args[0].value, program.P))
@base.gf2n
@base.vectorize
class inputmask(base.Instruction):
__slots__ = []
code = base.opcodes['INPUTMASK']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
@base.gf2n
@base.vectorize
class prep(base.Instruction):
__slots__ = []
code = base.opcodes['PREP']
arg_format = tools.chain(['str'], itertools.repeat('sw'))
gf2n_arg_format = tools.chain(['str'], itertools.repeat('sgw'))
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, self.args[0]), 1)
def has_var_args(self):
return True
ase.vectorize
class asm_input(base.IOInstruction):
__slots__ = []
code = base.opcodes['INPUT']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
def execute(self):
self.args[0].value = _python_input("Enter player %d's input:" % self.args[1]) % program.P
@base.gf2n
class startinput(base.RawInputInstruction):
__slots__ = []
code = base.opcodes['STARTINPUT']
arg_format = ['p', 'int']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[0]), \
self.args[1])
class stopinput(base.RawInputInstruction):
__slots__ = []
code = base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sw'))
def has_var_args(self):
return True
class gstopinput(base.RawInputInstruction):
__slots__ = []
code = 0x100 + base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sgw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class print_mem(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTMEM']
arg_format = ['c']
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTREG']
arg_format = ['c','i']
def __init__(self, reg, comment=''):
super(print_reg_class, self).__init__(reg, self.str_to_int(comment))
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg_plain(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTREGPLAIN']
arg_format = ['c']
#@base.gf2n
@base.vectorize
class e_print_fixed_plain(base.IOInstruction):
__slots__ = []
code = base.opcodes['E_PRINTFIXEDPLAIN']
arg_format = ['c', 'int']
@base.vectorize
class print_float_plain(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTFLOATPLAIN']
arg_format = ['c', 'c', 'c', 'c']
class print_int(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTINT']
arg_format = ['ci']
class print_char(base.IOInstruction):
code = base.opcodes['PRINTCHR']
arg_format = ['int']
def __init__(self, ch):
super(print_char, self).__init__(ord(ch))
class print_char4(base.IOInstruction):
code = base.opcodes['PRINTSTR']
arg_format = ['int']
def __init__(self, val):
super(print_char4, self).__init__(self.str_to_int(val))
@base.vectorize
class print_char_regint(base.IOInstruction):
code = base.opcodes['PRINTCHRINT']
arg_format = ['ci']
@base.vectorize
class print_char4_regint(base.IOInstruction):
code = base.opcodes['PRINTSTRINT']
arg_format = ['ci']
@base.vectorize
class pubinput(base.PublicFileIOInstruction):
__slots__ = []
code = base.opcodes['PUBINPUT']
arg_format = ['ciw']
@base.vectorize
class readsocketc(base.IOInstruction):
__slots__ = []
code = base.opcodes['READSOCKETC']
arg_format = tools.chain(['ci'], itertools.repeat('cw'))
def has_var_args(self):
return True
@base.vectorize
class readsockets(base.IOInstruction):
__slots__ = []
code = base.opcodes['READSOCKETS']
arg_format = tools.chain(['ci'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.vectorize
class readsocketint(base.IOInstruction):
__slots__ = []
code = base.opcodes['READSOCKETINT']
arg_format = tools.chain(['ci'], itertools.repeat('ciw'))
def has_var_args(self):
return True
@base.vectorize
class writesocketc(base.IOInstruction):
__slots__ = []
code = base.opcodes['WRITESOCKETC']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('c'))
def has_var_args(self):
return True
@base.vectorize
class writesockets(base.IOInstruction):
__slots__ = []
code = base.opcodes['WRITESOCKETS']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketshare(base.IOInstruction):
__slots__ = []
code = base.opcodes['WRITESOCKETSHARE']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketint(base.IOInstruction):
__slots__ = []
code = base.opcodes['WRITESOCKETINT']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('ci'))
def has_var_args(self):
return True
class listen(base.IOInstruction):
__slots__ = []
code = base.opcodes['LISTEN']
arg_format = ['int']
class acceptclientconnection(base.IOInstruction):
__slots__ = []
code = base.opcodes['ACCEPTCLIENTCONNECTION']
arg_format = ['ciw', 'int']
class connectipv4(base.IOInstruction):
__slots__ = []
code = base.opcodes['CONNECTIPV4']
arg_format = ['ciw', 'ci', 'int']
class readclientpublickey(base.IOInstruction):
__slots__ = []
code = base.opcodes['READCLIENTPUBLICKEY']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class initsecuresocket(base.IOInstruction):
__slots__ = []
code = base.opcodes['INITSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class respsecuresocket(base.IOInstruction):
__slots__ = []
code = base.opcodes['RESPSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class writesharestofile(base.IOInstruction):
__slots__ = []
code = base.opcodes['WRITEFILESHARE']
arg_format = itertools.repeat('s')
def has_var_args(self):
return True
class readsharesfromfile(base.IOInstruction):
__slots__ = []
code = base.opcodes['READFILESHARE']
arg_format = tools.chain(['ci', 'ciw'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class raw_output(base.PublicFileIOInstruction):
__slots__ = []
code = base.opcodes['RAWOUTPUT']
arg_format = ['c']
@base.gf2n
@base.vectorize
class startprivateoutput(base.Instruction):
__slots__ = []
code = base.opcodes['STARTPRIVATEOUTPUT']
arg_format = ['sw','s','p']
@base.gf2n
@base.vectorize
class stopprivateoutput(base.Instruction):
__slots__ = []
code = base.opcodes['STOPPRIVATEOUTPUT']
arg_format = ['c','p']
@base.vectorize
class rand(base.Instruction):
__slots__ = []
code = base.opcodes['RAND']
arg_format = ['ciw','ci']
###
### Integer operations
###
@base.vectorize
class ldint(base.Instruction):
__slots__ = []
code = base.opcodes['LDINT']
arg_format = ['ciw', 'i']
@base.vectorize
class addint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['ADDINT']
@base.vectorize
class subint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['SUBINT']
@base.vectorize
class mulint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['MULINT']
@base.vectorize
class divint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['DIVINT']
###
### Clear comparison instructions
###
@base.vectorize
class eqzc(base.UnaryComparisonInstruction):
__slots__ = []
code = base.opcodes['EQZC']
def execute(self):
if self.args[1].value == 0:
self.args[0].value = 1
else:
self.args[0].value = 0
@base.vectorize
class ltzc(base.UnaryComparisonInstruction):
__slots__ = []
code = base.opcodes['LTZC']
@base.vectorize
class ltc(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['LTC']
@base.vectorize
class gtc(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['GTC']
@base.vectorize
class eqc(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['EQC']
###
### Jumps etc
###
class jmp(base.JumpInstruction):
__slots__ = []
code = base.opcodes['JMP']
arg_format = ['int']
jump_arg = 0
def execute(self):
pass
class jmpi(base.JumpInstruction):
__slots__ = []
code = base.opcodes['JMPI']
arg_format = ['ci']
jump_arg = 0
class jmpnz(base.JumpInstruction):
__slots__ = []
code = base.opcodes['JMPNZ']
arg_format = ['ci', 'int']
jump_arg = 1
def execute(self):
pass
class jmpeqz(base.JumpInstruction):
__slots__ = []
code = base.opcodes['JMPEQZ']
arg_format = ['ci', 'int']
jump_arg = 1
def execute(self):
pass
###
### Conversions
###
@base.gf2n
@base.vectorize
class convint(base.Instruction):
__slots__ = []
code = base.opcodes['CONVINT']
arg_format = ['cw', 'ci']
@base.vectorize
class convmodp(base.Instruction):
__slots__ = []
code = base.opcodes['CONVMODP']
arg_format = ['ciw', 'c', 'int']
def __init__(self, *args, **kwargs):
bitlength = kwargs.get('bitlength', program.bit_length)
super(convmodp_class, self).__init__(*(args + (bitlength,)))
@base.vectorize
class gconvgf2n(base.Instruction):
__slots__ = []
code = base.opcodes['GCONVGF2N']
arg_format = ['ciw', 'cg']
###
### Other instructions
###
@base.gf2n
@base.vectorize
class startopen(base.VarArgsInstruction):
__slots__ = []
code = base.opcodes['STARTOPEN']
arg_format = itertools.repeat('s')
def execute(self):
for arg in self.args[::-1]:
program.curr_block.open_queue.append(arg.value)
@base.gf2n
@base.vectorize
class e_startopen(startopen_class):
__slots__ = []
code = base.opcodes['E_STARTOPEN']
arg_format = itertools.repeat('s')
def execute(self):
for arg in self.args[::-1]:
program.curr_block.open_queue.append(arg.value)
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class stopopen(base.VarArgsInstruction):
__slots__ = []
code = base.opcodes['STOPOPEN']
arg_format = itertools.repeat('cw')
def execute(self):
for arg in self.args:
arg.value = program.curr_block.open_queue.pop()
@base.gf2n
@base.vectorize
class e_stopopen(stopopen_class):
__slots__ = []
code = base.opcodes['E_STOPOPEN']
arg_format = itertools.repeat('cw')
def execute(self):
for arg in self.args:
arg.value = program.curr_block.open_queue.pop()
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class e_mult(base.VarArgsInstruction):
__slots__ = []
code = base.opcodes['E_MULT']
arg_format = tools.cycle(['sw', 's', 's'])
# rename 'open' to avoid conflict with built-in open function
@base.gf2n
@base.vectorize
class asm_open(base.VarArgsInstruction):
__slots__ = []
code = base.opcodes['OPEN']
arg_format = tools.cycle(['cw','s'])
###
### CISC-style instructions
###
# rename 'open' to avoid conflict with built-in open function
# @base.gf2n
# @base.vectorize
# class asm_open(base.CISC):
# """ Open the value in $s_j$ and assign it to $c_i$. """
# __slots__ = []
# arg_format = ['cw','s']
#
# def expand(self):
#
# startopen(self.args[1])
# stopopen(self.args[0])
#
#
# """ Extended (NEC) open the value in $s_j$ and assign it to $c_i$. """
# #estartopen(self.args[1])
# #estopopen(self.args[0])
@base.gf2n
@base.vectorize
class e_lessthan(base.CISC):
__slots__ = []
arg_format = ['s','s','int','sgw']
def expand(self):
step = self.args[2]
tmp = program.curr_block.new_reg('s')
bit_array_sub = [program.curr_block.new_reg('sg') for _ in range(step)]
# signed ver. (start)
prod_left = program.curr_block.new_reg('sg')
prod_right = program.curr_block.new_reg('sg')
prod = program.curr_block.new_reg('sg')
ans = program.curr_block.new_reg('sg')
bit_array_self = [program.curr_block.new_reg('sg') for _ in range(step)]
bit_array_other = [program.curr_block.new_reg('sg') for _ in range(step)]
# signed ver. (end)
subs(tmp, self.args[0], self.args[1])
e_bitdec(tmp, step, *bit_array_sub)
# signed ver. (start)
e_bitdec(self.args[0], step, *bit_array_self)
e_bitdec(self.args[1], step, *bit_array_other)
gadds(prod_left, bit_array_self[step - 1], bit_array_other[step - 1])
gadds(prod_right, bit_array_sub[step - 1], bit_array_self[step - 1])
gmuls(prod, prod_left, prod_right)
# ge_startmult(prod_left, prod_right)
# ge_stopmult(prod)
gadds(self.args[3], prod, bit_array_sub[step - 1])
# signed ver. (end)
# DEBUG (start)
# DEBUG (end)
# result = bit_array_sub[step - 1].e_bit_inject()
@base.gf2n
@base.vectorize
class e_trunc(base.CISC):
__slots__ = []
arg_format = ['s','int','sw']
def expand(self):
a = [program.curr_block.new_reg('sg') for _ in range(64)]
b = [program.curr_block.new_reg('sg') for _ in range(64)]
e_bitdec(self.args[0], 64, *a)
for i in range(64):
if i + self.args[1] >= 64 :
gldsi(b[i],0)
else :
b[i] = a[i + self.args[1]]
e_bitrec(self.args[2], 64, *b)
# return a
@base.gf2n
@base.vectorize
class e_pow2(base.CISC):
__slots__ = []
arg_format = ['s', 'int', 'sw']
def expand(self):
m = int(math.ceil(math.log(self.args[1],2)))
ai = [program.curr_block.new_reg('sg') for _ in range(m)]
a = [program.curr_block.new_reg('s') for _ in range(m)]
pow2k = [program.curr_block.new_reg('c') for _ in range(m)]
tmp_x = [program.curr_block.new_reg('s') for _ in range(m)]
tmp2_x = [program.curr_block.new_reg('s') for _ in range(m)]
tmp3_x = [program.curr_block.new_reg('s') for _ in range(m)]
x = [program.curr_block.new_reg('s') for _ in range(m)]
e_bitdec(self.args[0], m ,*ai)
for i in range(m):
e_bitinj(ai[i], a[i])
ldi(pow2k[0], 2)
for i in range(0,m-1):
mulc(pow2k[i+1], pow2k[i], pow2k[i])
mulm(tmp_x[0], a[0], pow2k[0])
addsi(tmp2_x[0], tmp_x[0], 1)
subs(tmp3_x[0], tmp2_x[0], a[0])
for i in range(1,m):
mulm(tmp_x[i], a[i], pow2k[i])
addsi(tmp2_x[i], tmp_x[i], 1)
subs(tmp3_x[i], tmp2_x[i], a[i])
x[0] = tmp3_x[0]
for i in range(0,m-1):
muls(x[i+1], tmp3_x[i+1], x[i])
addsi(self.args[2], x[m-1], 0)
#addm(self.args[2],tmp, pow2k[3])
#@base.gf2n
@base.vectorize
class e_prefixor(base.CISC):
__slots__ = []
arg_format = tools.chain(['s', 'int'], itertools.repeat('sw'))
def expand(self):
array1 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
array2 = [program.curr_block.new_reg('s') for _ in range(self.args[1])]
garray = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp1 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp2 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp3 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
tmp4 = [program.curr_block.new_reg('sg') for _ in range(self.args[1])]
n = self.args[1]
e_bitdec(self.args[0], n, *array1)
garray[0] = array1[n -1]
e_bitinj(array1[n-1], self.args[2])
for i in range(1, n):
gaddsi(tmp1[i], array1[n - (i + 1)], 1)
gaddsi(tmp2[i], garray[i - 1], 1)
gmuls(tmp3[i], tmp1[i], tmp2[i])
gaddsi(garray[i], tmp3[i], 1)
e_bitinj(garray[i], self.args[2 + i])
#OR(a,b)=((1+a)*(1+b))+1
#@base.gf2n
@base.vectorize
class e_bitdec(base.CISC):
__slots__ = []
code = base.opcodes['E_BITDEC']
arg_format = tools.chain(['s', 'int'], itertools.repeat('sgw'))
def expand(self):
#conf = ConfigParser.ConfigParser()
#print conf
#conf.read('config.ini')
#print conf.get('DEFAULT', 'DEBUG')
#print inifile.get('default', 'type_of_decomposition')
#print conf.get('conversion', 'type_of_decomposition')
type_of_decomposition = "round_n"
if type_of_decomposition == 'round_sqrt':
#decomposition : square_root(n) round ver. (start)
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * 64)]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(64)]
z = [program.curr_block.new_reg('sg') for i in range(64)]
in_c_left = [program.curr_block.new_reg('sg') for i in range(64)]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(64)]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(64)]
c = [program.curr_block.new_reg('sg') for i in range(64 + 1)]
c_xor_d = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
in_d_left = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
in_d_prod = [[program.curr_block.new_reg('sg') for i in range(64)] for j in range(2)]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(64)]
first_4bit_d = [program.curr_block.new_reg('sg') for i in range(5)]
d_4bit_block = [[program.curr_block.new_reg('sg') for i in range(5)] for j in range(2)]
d_5bit_block = [[program.curr_block.new_reg('sg') for i in range(6)] for j in range(2)]
d_6bit_block = [[program.curr_block.new_reg('sg') for i in range(7)] for j in range(2)]
d_7bit_block = [[program.curr_block.new_reg('sg') for i in range(8)] for j in range(2)]
d_8bit_block = [[program.curr_block.new_reg('sg') for i in range(9)] for j in range(2)]
d_9bit_block = [[program.curr_block.new_reg('sg') for i in range(10)] for j in range(2)]
d_10bit_block = [[program.curr_block.new_reg('sg') for i in range(11)] for j in range(2)]
d_11bit_block = [[program.curr_block.new_reg('sg') for i in range(12)] for j in range(2)]
in_mux_right_4 = [program.curr_block.new_reg('sg') for i in range(5)]
in_mux_prod_4 = [program.curr_block.new_reg('sg') for i in range(5)]
in_mux_right_5 = [program.curr_block.new_reg('sg') for i in range(6)]
in_mux_prod_5 = [program.curr_block.new_reg('sg') for i in range(6)]
in_mux_right_6 = [program.curr_block.new_reg('sg') for i in range(7)]
in_mux_prod_6 = [program.curr_block.new_reg('sg') for i in range(7)]
in_mux_right_7 = [program.curr_block.new_reg('sg') for i in range(8)]
in_mux_prod_7 = [program.curr_block.new_reg('sg') for i in range(8)]
in_mux_right_8 = [program.curr_block.new_reg('sg') for i in range(9)]
in_mux_prod_8 = [program.curr_block.new_reg('sg') for i in range(9)]
in_mux_right_9 = [program.curr_block.new_reg('sg') for i in range(10)]
in_mux_prod_9 = [program.curr_block.new_reg('sg') for i in range(10)]
in_mux_right_10 = [program.curr_block.new_reg('sg') for i in range(11)]
in_mux_prod_10 = [program.curr_block.new_reg('sg') for i in range(11)]
in_mux_right_11 = [program.curr_block.new_reg('sg') for i in range(12)]
in_mux_prod_11 = [program.curr_block.new_reg('sg') for i in range(12)]
e_skew_bit_dec(self.args[0], 64, *skew_res)
gldsi(c[0], 0)
gldsi(first_4bit_d[0], 0)
gldsi(d_4bit_block[0][0], 0)
gldsi(d_4bit_block[1][0], 1)
gldsi(d_5bit_block[0][0], 0)
gldsi(d_5bit_block[1][0], 1)
gldsi(d_6bit_block[0][0], 0)
gldsi(d_6bit_block[1][0], 1)
gldsi(d_7bit_block[0][0], 0)
gldsi(d_7bit_block[1][0], 1)
gldsi(d_8bit_block[0][0], 0)
gldsi(d_8bit_block[1][0], 1)
gldsi(d_9bit_block[0][0], 0)
gldsi(d_9bit_block[1][0], 1)
gldsi(d_10bit_block[0][0], 0)
gldsi(d_10bit_block[1][0], 1)
gldsi(d_11bit_block[0][0], 0)
gldsi(d_11bit_block[1][0], 1)
# compute all [z] and [c]
for j in range(64):
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j + 1], in_c_prod[j], skew_res[3 * j + 2])
# compute c_xor_z
gadds(c_xor_z[j], c[j], z[j])
# compute for first 4 bit and next 4bit
for j in range(4):
# for frist_4_bit_d
gadds(c_xor_d[0][j], c[j], first_4bit_d[j])
gaddsi(in_d_left[0][j], c_xor_d[0][j], 1)
gmuls(in_d_prod[0][j], in_d_left[0][j], c_xor_z[j])
# ge_startmult(in_d_left[0][j], c_xor_z[j])
# ge_stopmult(in_d_prod[0][j])
gadds(first_4bit_d[j + 1], in_d_prod[0][j], z[j])
# compute [x|j]
gadds(self.args[2 + j], c_xor_z[j], first_4bit_d[j])
for i in range(2):
# for other block
# first bit of 4bit_block = 4th bit
gadds(c_xor_d[i][4+j], c[4+j], d_4bit_block[i][j])
gaddsi(in_d_left[i][4+j], c_xor_d[i][4+j], 1)
gmuls(in_d_prod[i][4+j], in_d_left[i][4+j], c_xor_z[4+j])
# ge_startmult(in_d_left[i][4+j], c_xor_z[4+j])
# ge_stopmult(in_d_prod[i][4+j])
gadds(d_4bit_block[i][j+1], in_d_prod[i][4+j], z[4+j])
# compute for next 5bit
for j in range(5):
for i in range(2):
# first bit of 5bit_block = 8th bit
gadds(c_xor_d[i][8+j], c[8+j], d_5bit_block[i][j])
gaddsi(in_d_left[i][8+j], c_xor_d[i][8+j], 1)
gmuls(in_d_prod[i][8+j], in_d_left[i][8+j], c_xor_z[8+j])
# ge_startmult(in_d_left[i][8+j], c_xor_z[8+j])
# ge_stopmult(in_d_prod[i][8+j])
gadds(d_5bit_block[i][j+1], in_d_prod[i][8+j], z[8+j])
# compute for next 6bit
for j in range(6):
for i in range(2):
# first bit of 6bit_block = 13th bit
gadds(c_xor_d[i][13+j], c[13+j], d_6bit_block[i][j])
gaddsi(in_d_left[i][13+j], c_xor_d[i][13+j], 1)
gmuls(in_d_prod[i][13+j], in_d_left[i][13+j], c_xor_z[13+j])
# ge_startmult(in_d_left[i][13+j], c_xor_z[13+j])
# ge_stopmult(in_d_prod[i][13+j])
gadds(d_6bit_block[i][j+1], in_d_prod[i][13+j], z[13+j])
# compute for next 7bit
for j in range(7):
for i in range(2):
# first bit of 7bit_block = 19th bit
gadds(c_xor_d[i][19+j], c[19+j], d_7bit_block[i][j])
gaddsi(in_d_left[i][19+j], c_xor_d[i][19+j], 1)
gmuls(in_d_prod[i][19+j], in_d_left[i][19+j], c_xor_z[19+j])
# ge_startmult(in_d_left[i][19+j], c_xor_z[19+j])
# ge_stopmult(in_d_prod[i][19+j])
gadds(d_7bit_block[i][j+1], in_d_prod[i][19+j], z[19+j])
# compute for next 8bit
for j in range(8):
for i in range(2):
# first bit of 8bit_block = 26th bit
gadds(c_xor_d[i][26 + j], c[26 + j], d_8bit_block[i][j])
gaddsi(in_d_left[i][26 + j], c_xor_d[i][26 + j], 1)
gmuls(in_d_prod[i][26 + j], in_d_left[i][26 + j], c_xor_z[26 + j])
# ge_startmult(in_d_left[i][26 + j], c_xor_z[26 + j])
# ge_stopmult(in_d_prod[i][26 + j])
gadds(d_8bit_block[i][j + 1], in_d_prod[i][26 + j], z[26 + j])
# compute for next 9bit
for j in range(9):
for i in range(2):
# first bit of 9bit_block = 34th bit
gadds(c_xor_d[i][34 + j], c[34 + j], d_9bit_block[i][j])
gaddsi(in_d_left[i][34 + j], c_xor_d[i][34 + j], 1)
gmuls(in_d_prod[i][34 + j], in_d_left[i][34 + j], c_xor_z[34 + j])
# ge_startmult(in_d_left[i][34 + j], c_xor_z[34 + j])
# ge_stopmult(in_d_prod[i][34 + j])
gadds(d_9bit_block[i][j + 1], in_d_prod[i][34 + j], z[34 + j])
# compute for next 10bit
for j in range(10):
for i in range(2):
# first bit of 10bit_block = 43th bit
gadds(c_xor_d[i][43 + j], c[43 + j], d_10bit_block[i][j])
gaddsi(in_d_left[i][43 + j], c_xor_d[i][43 + j], 1)
gmuls(in_d_prod[i][43 + j], in_d_left[i][43 + j], c_xor_z[43 + j])
# ge_startmult(in_d_left[i][43 + j], c_xor_z[43 + j])
# ge_stopmult(in_d_prod[i][43 + j])
gadds(d_10bit_block[i][j + 1], in_d_prod[i][43 + j], z[43 + j])
# compute for next 11bit
for j in range(11):
for i in range(2):
# first bit of 11bit_block = 53th bit
gadds(c_xor_d[i][53 + j], c[53 + j], d_11bit_block[i][j])
gaddsi(in_d_left[i][53 + j], c_xor_d[i][53 + j], 1)
gmuls(in_d_prod[i][53 + j], in_d_left[i][53 + j], c_xor_z[53 + j])
# ge_startmult(in_d_left[i][53 + j], c_xor_z[53 + j])
# ge_stopmult(in_d_prod[i][53 + j])
gadds(d_11bit_block[i][j + 1], in_d_prod[i][53 + j], z[53 + j])
# connect first 4bit and next 4bit block
selected_d_4bit_block = [program.curr_block.new_reg('sg') for i in range(5)]
for j in range(5):
# compute MUX
gadds(in_mux_right_4[j], d_4bit_block[0][j], d_4bit_block[1][j])
gmuls(in_mux_prod_4[j], in_mux_right_4[j], first_4bit_d[4])
# ge_startmult(in_mux_right_4[j], first_4bit_d[4])
# ge_stopmult(in_mux_prod_4[j])
gadds(selected_d_4bit_block[j], in_mux_prod_4[j], d_4bit_block[0][j])
if j < 4:
# compute [x|j]
gadds(self.args[2 + (4 + j)], c_xor_z[4 + j], selected_d_4bit_block[j])
# connect 4bit block and next 5bit block
selected_d_5bit_block = [program.curr_block.new_reg('sg') for i in range(6)]
for j in range(6):
# compute MUX
gadds(in_mux_right_5[j], d_5bit_block[0][j], d_5bit_block[1][j])
gmuls(in_mux_prod_5[j], in_mux_right_5[j], selected_d_4bit_block[4])
# ge_startmult(in_mux_right_5[j], selected_d_4bit_block[4])
# ge_stopmult(in_mux_prod_5[j])
gadds(selected_d_5bit_block[j], in_mux_prod_5[j], d_5bit_block[0][j])
if j < 5:
# compute [x|j]
gadds(self.args[2 + (8 + j)], c_xor_z[8 + j], selected_d_5bit_block[j])
# connect 5bit block and next 6bit block
selected_d_6bit_block = [program.curr_block.new_reg('sg') for i in range(7)]
for j in range(7):
# compute MUX
gadds(in_mux_right_6[j], d_6bit_block[0][j], d_6bit_block[1][j])
gmuls(in_mux_prod_6[j], in_mux_right_6[j], selected_d_5bit_block[5])
# ge_startmult(in_mux_right_6[j], selected_d_5bit_block[5])
# ge_stopmult(in_mux_prod_6[j])
gadds(selected_d_6bit_block[j], in_mux_prod_6[j], d_6bit_block[0][j])
if j < 6:
# compute [x|j]
gadds(self.args[2 + (13 + j)], c_xor_z[13 + j], selected_d_6bit_block[j])
# connect 6bit block and next 7bit block
selected_d_7bit_block = [program.curr_block.new_reg('sg') for i in range(8)]
for j in range(8):
# compute MUX
gadds(in_mux_right_7[j], d_7bit_block[0][j], d_7bit_block[1][j])
gmuls(in_mux_prod_7[j], in_mux_right_7[j], selected_d_6bit_block[6])
# ge_startmult(in_mux_right_7[j], selected_d_6bit_block[6])
# ge_stopmult(in_mux_prod_7[j])
gadds(selected_d_7bit_block[j], in_mux_prod_7[j], d_7bit_block[0][j])
if j < 7:
# compute [x|j]
gadds(self.args[2 + (19 + j)], c_xor_z[19 + j], selected_d_7bit_block[j])
# connect 7bit block and next 8bit block
selected_d_8bit_block = [program.curr_block.new_reg('sg') for i in range(9)]
for j in range(9):
# compute MUX
gadds(in_mux_right_8[j], d_8bit_block[0][j], d_8bit_block[1][j])
gmuls(in_mux_prod_8[j], in_mux_right_8[j], selected_d_7bit_block[7])
# ge_startmult(in_mux_right_8[j], selected_d_7bit_block[7])
# ge_stopmult(in_mux_prod_8[j])
gadds(selected_d_8bit_block[j], in_mux_prod_8[j], d_8bit_block[0][j])
if j < 8:
# compute [x|j]
gadds(self.args[2 + (26 + j)], c_xor_z[26 + j], selected_d_8bit_block[j])
# connect 8bit block and next 9bit block
selected_d_9bit_block = [program.curr_block.new_reg('sg') for i in range(10)]
for j in range(10):
# compute MUX
gadds(in_mux_right_9[j], d_9bit_block[0][j], d_9bit_block[1][j])
gmuls(in_mux_prod_9[j], in_mux_right_9[j], selected_d_8bit_block[8])
# ge_startmult(in_mux_right_9[j], selected_d_8bit_block[8])
# ge_stopmult(in_mux_prod_9[j])
gadds(selected_d_9bit_block[j], in_mux_prod_9[j], d_9bit_block[0][j])
if j < 9:
# compute [x|j]
gadds(self.args[2 + (34 + j)], c_xor_z[34 + j], selected_d_9bit_block[j])
# connect 9bit block and next 10bit block
selected_d_10bit_block = [program.curr_block.new_reg('sg') for i in range(11)]
for j in range(11):
# compute MUX
gadds(in_mux_right_10[j], d_10bit_block[0][j], d_10bit_block[1][j])
gmuls(in_mux_prod_10[j], in_mux_right_10[j], selected_d_9bit_block[9])
# ge_startmult(in_mux_right_10[j], selected_d_9bit_block[9])
# ge_stopmult(in_mux_prod_10[j])
gadds(selected_d_10bit_block[j], in_mux_prod_10[j], d_10bit_block[0][j])
if j < 10:
# compute [x|j]
gadds(self.args[2 + (43 + j)], c_xor_z[43 + j], selected_d_10bit_block[j])
# connect 10bit block and next 11bit block
selected_d_11bit_block = [program.curr_block.new_reg('sg') for i in range(12)]
for j in range(11):
# compute MUX
gadds(in_mux_right_11[j], d_11bit_block[0][j], d_11bit_block[1][j])
gmuls(in_mux_prod_11[j], in_mux_right_11[j], selected_d_10bit_block[10])
# ge_startmult(in_mux_right_11[j], selected_d_10bit_block[10])
# ge_stopmult(in_mux_prod_11[j])
gadds(selected_d_11bit_block[j], in_mux_prod_11[j], d_11bit_block[0][j])
# compute [x|j]
gadds(self.args[2 + (53 + j)], c_xor_z[53 + j], selected_d_11bit_block[j])
#decomposition : square_root(n) round ver. (end)
elif type_of_decomposition == 'round_log':
#decomposition : log(n) round ver. (start)
log_val = int(math.ceil(math.log(self.args[1], 2)))
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * self.args[1])]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c = [program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_d = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
in_d_left = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
in_d_prod = [[[program.curr_block.new_reg('sg') for i in range(self.args[1])] for j in range(2)] for k in range(log_val)]
d = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
in_mux_right = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
in_mux_prod = [[[program.curr_block.new_reg('sg') for i in range(self.args[1] + 1)] for j in range(2)] for k in range(log_val)]
gldsi(c[0],0)
gldsi(d[log_val - 1][0][0], 0)
e_skew_bit_dec(self.args[0], self.args[1], *skew_res)
# compute all [z] and [c]
for j in range(self.args[1]):
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j + 1], in_c_prod[j], skew_res[3 * j + 2])
# compute c_xor_z
gadds(c_xor_z[j], c[j], z[j])
# compute all [d] -- assume that self.args[1] >= 8
for k in range(log_val - 1):
valid_carry_idx = 2 ** (k + 1)
# print("valid_carry_idx = {0}".format(valid_carry_idx))
if k == 0:
# compute candidate of [d]
for j in range(2):
for i in range(self.args[1]):
if (j == 0) and (i == 0):
gadds(c_xor_d[k][0][i], c[i], d[log_val - 1][0][i])
gaddsi(in_d_left[k][0][i], c_xor_d[k][0][i], 1)
gmuls(in_d_prod[k][0][i], in_d_left[k][0][i], c_xor_z[i])
# ge_startmult(in_d_left[k][0][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][0][i])
gadds(d[log_val - 1][0][i+1], in_d_prod[k][0][i], z[i])
elif (j == 0) and (i == 1):
gadds(c_xor_d[k][0][i], c[i], d[log_val - 1][0][i])
gaddsi(in_d_left[k][0][i], c_xor_d[k][0][i], 1)
gmuls(in_d_prod[k][0][i], in_d_left[k][0][i], c_xor_z[i])
# ge_startmult(in_d_left[k][0][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][0][i])
gadds(d[log_val - 1][0][i+1], in_d_prod[k][0][i], z[i])
elif (i >= 2) and (i % 2 == 0):
gaddsi(c_xor_d[k][j][i], c[i], j)
gaddsi(in_d_left[k][j][i], c_xor_d[k][j][i], 1)
gmuls(in_d_prod[k][j][i], in_d_left[k][j][i], c_xor_z[i])
# ge_startmult(in_d_left[k][j][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][j][i])
gadds(d[k][j][i+1], in_d_prod[k][j][i], z[i])
elif (i >= 2) and (i % 2 == 1):
gadds(c_xor_d[k][j][i], c[i], d[k][j][i])
gaddsi(in_d_left[k][j][i], c_xor_d[k][j][i], 1)
gmuls(in_d_prod[k][j][i], in_d_left[k][j][i], c_xor_z[i])
# ge_startmult(in_d_left[k][j][i], c_xor_z[i])
# ge_stopmult(in_d_prod[k][j][i])
gadds(d[k][j][i+1], in_d_prod[k][j][i], z[i])
# select and connect blocks of [d]
for j in range(2):
for i in range(1, self.args[1]):
if (j == 0) and (i == valid_carry_idx):
for connect_idx in range(valid_carry_idx, 2 * valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[log_val - 1][0][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
elif (i >= 2 * valid_carry_idx) and (i % (2 * valid_carry_idx) == valid_carry_idx -1):
d[k + 1][j][i] = d[k][j][i]
elif (i >= 2 * valid_carry_idx) and (i % (2 * valid_carry_idx) == valid_carry_idx):
for connect_idx in range(i, i + valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[k + 1][j][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
if connect_idx == i:
d[k+1][j][i] = d[k][j][i]
else:
# select and connect blocks of [d]
for j in range(2):
count = 1
for i in range(1, self.args[1]):
finished_block = 2 * count
if (j == 0) and (i == valid_carry_idx):
for connect_idx in range(valid_carry_idx, 2 * valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[log_val - 1][0][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[log_val - 1][0][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
elif (i >= finished_block * valid_carry_idx) and (i % (2 * valid_carry_idx) > 0) and (i % (2 * valid_carry_idx) <= valid_carry_idx - 1) and (k <= (log_val - 2)):
d[k + 1][j][i] = d[k][j][i]
elif (i >= finished_block * valid_carry_idx) and (i % (2 * valid_carry_idx) >= valid_carry_idx) and (k <= (log_val - 2)):
for connect_idx in range(i, i + valid_carry_idx):
# compute MUX
gadds(in_mux_right[k][j][connect_idx + 1], d[k][0][connect_idx + 1], d[k][1][connect_idx + 1])
gmuls(in_mux_prod[k][j][connect_idx + 1], in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_startmult(in_mux_right[k][j][connect_idx + 1], d[k][j][i])
# ge_stopmult(in_mux_prod[k][j][connect_idx + 1])
gadds(d[k + 1][j][connect_idx + 1], in_mux_prod[k][j][connect_idx + 1], d[k][0][connect_idx + 1])
if connect_idx == i:
d[k + 1][j][i] = d[k][j][i]
if connect_idx == i + valid_carry_idx - 1:
count += 1
# compute [x|j]
for i in range(self.args[1]):
gadds(self.args[2 + i], c_xor_z[i], d[log_val - 1][0][i])
# decomposition : log(n) round ver. (end)
else:
# decomposition : n-1 round ver. (start)
skew_res = [program.curr_block.new_reg('sg') for i in range(3 * self.args[1])]
x1_xor_x2 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
x1_xor_x3 = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_c_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_d = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_d_left = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
in_d_prod = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
c_xor_z = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
d = [program.curr_block.new_reg('sg') for i in range(self.args[1])]
e_skew_bit_dec(self.args[0], self.args[1], *skew_res)
gldsi(c[0], 0)
gldsi(d[0], 0)
for j in range(self.args[1]):
if self.args[1] == 1:
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(self.args[2 + j], skew_res[3 * j + 2], x1_xor_x2[j])
else:
if j == self.args[1] - 1:
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute c_xor_d[j]
gadds(c_xor_d[j], c[j], d[j])
# compute [x|j]
gadds(self.args[2 + j], z[j], c_xor_d[j])
else:
# compute [z]
gadds(x1_xor_x2[j], skew_res[3 * j], skew_res[3 * j + 1])
gadds(z[j], skew_res[3 * j + 2], x1_xor_x2[j])
# compute [c]
gaddsi(in_c_left[j], x1_xor_x2[j], 1)
gadds(x1_xor_x3[j], skew_res[3 * j], skew_res[3 * j + 2])
gmuls(in_c_prod[j], in_c_left[j], x1_xor_x3[j])
# ge_startmult(in_c_left[j], x1_xor_x3[j])
# ge_stopmult(in_c_prod[j])
gadds(c[j+1], in_c_prod[j], skew_res[3 * j + 2])
# compute [d]
gadds(c_xor_d[j], c[j], d[j])
gaddsi(in_d_left[j], c_xor_d[j], 1)
gadds(c_xor_z[j], c[j], z[j])
gmuls(in_d_prod[j], in_d_left[j], c_xor_z[j])
# ge_startmult(in_d_left[j], c_xor_z[j])
# ge_stopmult(in_d_prod[j])
gadds(d[j + 1], in_d_prod[j], z[j])
# compute [x|j]
gadds(self.args[2 + j], z[j], c_xor_d[j])
# decomposition : n-1 round ver. (end)
#@base.gf2n
@base.vectorize
class e_bitinj(base.CISC):
__slots__ = []
code = base.opcodes['E_BITINJ']
arg_format = ['sg', 'sw']
def expand(self):
x1 = program.curr_block.new_reg('s')
x2 = program.curr_block.new_reg('s')
x3 = program.curr_block.new_reg('s')
sum12 = program.curr_block.new_reg('s')
sum123 = program.curr_block.new_reg('s')
prod12 = program.curr_block.new_reg('s')
twice_prod12 = program.curr_block.new_reg('s')
twice_x3 = program.curr_block.new_reg('s')
round2_right = program.curr_block.new_reg('s')
round2_prod = program.curr_block.new_reg('s')
res_left = program.curr_block.new_reg('s')
#e_skew_inj(self.args[0], x1, x2, x3)
e_skew_bit_inj(self.args[0], x1, x2, x3)
# compute [x1] + [x2] +[x3]
adds(sum12, x1, x2)
adds(sum123, x3, sum12)
# compute [x1] * [x2]
muls(prod12, x1, x2)
# e_startmult(x1, x2)
# e_stopmult(prod12)
# * 2
mulsi(twice_prod12, prod12, 2)
mulsi(twice_x3, x3, 2)
# compute ([x1] + [x2] - 2 * [x1] * [x2])
subs(round2_right, sum12, twice_prod12)
muls(round2_prod, twice_x3, round2_right)
# e_startmult(twice_x3, round2_right)
# e_stopmult(round2_prod)
# compute result
subs(res_left, sum123, twice_prod12)
subs(self.args[1], res_left, round2_prod)
@base.vectorize
class e_bitrec(base.CISC):
__slots__ = []
code = base.opcodes['E_BITREC']
arg_format = tools.chain(['sw', 'int'], itertools.repeat('sg'))
def expand(self):
# self.args[1] is the number of array's elements
ring_size = 64
bit_s = [program.curr_block.new_reg('sg') for i in range(ring_size)]
c_xor_d = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x1 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x2 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x3 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x12 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
x13 = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in1_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
c = [program.curr_block.new_reg('sg') for i in range(ring_size + 1)]
c_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in2_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
in2_right = [program.curr_block.new_reg('sg') for i in range(ring_size)]
d = [program.curr_block.new_reg('sg') for i in range(ring_size + 1)]
d_left = [program.curr_block.new_reg('sg') for i in range(ring_size)]
zero_shares = [program.curr_block.new_reg('sg') for i in range(ring_size - self.args[1])]
gldsi(c[0], 0)
gldsi(d[0], 0)
for j in range(ring_size - self.args[1]):
gldsi(zero_shares[j], 0)
for j in range(ring_size):
if j == 0:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
e_skew_bit_rec(bit_s[j], x1[j], x2[j], x3[j])
gadds(x12[j], x1[j], x2[j])
gaddsi(in1_left[j], x12[j], 1)
gadds(x13[j], x1[j], x3[j])
gmuls(c_left[j], in1_left[j], x13[j])
gadds(c[j + 1], c_left[j], x3[j])
gaddsi(in2_left[j], c_xor_d[j], 1)
gadds(in2_right[j], c[j], bit_s[j])
gmuls(d_left[j], in2_left[j], in2_right[j])
gadds(d[j + 1], d_left[j], bit_s[j])
elif j == ring_size - 1:
if j < self.args[1]:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
else:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], zero_shares[j - self.args[1]])
else:
if j < self.args[1]:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], self.args[2 + j])
else:
gadds(c_xor_d[j], c[j], d[j])
gadds(bit_s[j], c_xor_d[j], zero_shares[j - self.args[1]])
e_skew_bit_rec(bit_s[j], x1[j], x2[j], x3[j])
gadds(x12[j], x1[j], x2[j])
gaddsi(in1_left[j], x12[j], 1)
gadds(x13[j], x1[j], x3[j])
gmuls(c_left[j], in1_left[j], x13[j])
gadds(c[j + 1], c_left[j], x3[j])
gaddsi(in2_left[j], c_xor_d[j], 1)
gadds(in2_right[j], c[j], bit_s[j])
gmuls(d_left[j], in2_left[j], in2_right[j])
gadds(d[j + 1], d_left[j], bit_s[j])
e_skew_ring_rec(self.args[0], ring_size, *bit_s)
@base.vectorize
class e_read_from_file(base.CISC):
__slots__ = []
code = base.opcodes['E_READ_FROM_FILE']
arg_format = tools.chain(['s', 'int', 'int'], itertools.repeat('sw'))
def expand(self):
res = [program.curr_block.new_reg('s') for i in range(self.args[2])]
for j in range(self.args[2]):
res[j] = self.args[3+j]
e_input_share_int(self.args[1], self.args[2], *res)
@base.vectorize
class ge_read_from_file(base.CISC):
__slots__ = []
code = base.opcodes['GE_READ_FROM_FILE']
arg_format = tools.chain(['sg', 'int', 'int'], itertools.repeat('sgw'))
def expand(self):
res = [program.curr_block.new_reg('sg') for i in range(self.args[2])]
for j in range(self.args[2]):
res[j] = self.args[3+j]
ge_input_share_int(self.args[1], self.args[2], *res)
@base.vectorize
class e_input_share_int(base.Instruction):
__slots__ = []
code = base.opcodes['E_INPUT_SHARE_INT']
arg_format = tools.chain(['int', 'int'], itertools.repeat('sw'))
@base.vectorize
class ge_input_share_int(base.Instruction):
__slots__ = []
code = base.opcodes['GE_INPUT_SHARE_INT']
arg_format = tools.chain(['int', 'int'], itertools.repeat('sgw'))
@base.vectorize
class e_multi_startmult(startopen_class):
__slots__ = []
code = base.opcodes['E_MULTI_STARTMULT']
arg_format = itertools.repeat('s')
@base.vectorize
class e_multi_stopmult(stopopen_class):
__slots__ = []
code = base.opcodes['E_MULTI_STOPMULT']
arg_format = itertools.repeat('sw')
@base.gf2n
@base.vectorize
class e_startmult(startopen_class):
__slots__ = []
code = base.opcodes['E_STARTMULT']
arg_format = itertools.repeat('s')
@base.gf2n
@base.vectorize
class e_stopmult(stopopen_class):
__slots__ = []
code = base.opcodes['E_STOPMULT']
arg_format = itertools.repeat('sw')
@base.gf2n
@base.vectorize
class muls(base.CISC):
__slots__ = []
arg_format = ['sw','s','s']
def expand(self):
e_mult(self.args[0], self.args[1], self.args[2])
@base.gf2n
@base.vectorize
class sqrs(base.CISC):
__slots__ = []
arg_format = ['sw', 's']
def expand(self):
s = [program.curr_block.new_reg('s') for i in range(6)]
c = [program.curr_block.new_reg('c') for i in range(2)]
square(s[0], s[1])
subs(s[2], self.args[1], s[0])
asm_open(c[0], s[2])
mulc(c[1], c[0], c[0])
mulm(s[3], self.args[1], c[0])
adds(s[4], s[3], s[3])
adds(s[5], s[1], s[4])
subml(self.args[0], s[5], c[1])
@base.gf2n
@base.vectorize
class lts(base.CISC):
__slots__ = []
arg_format = ['sw', 's', 's', 'int', 'int']
def expand(self):
a = program.curr_block.new_reg('s')
subs(a, self.args[1], self.args[2])
comparison.LTZ(self.args[0], a, self.args[3], self.args[4])
@base.vectorize
class g2muls(base.CISC):
__slots__ = []
arg_format = ['sgw','sg','sg']
def expand(self):
s = [program.curr_block.new_reg('sg') for i in range(9)]
c = [program.curr_block.new_reg('cg') for i in range(3)]
gbittriple(s[0], s[1], s[2])
gsubs(s[3], self.args[1], s[0])
gsubs(s[4], self.args[2], s[1])
gstartopen(s[3], s[4])
gstopopen(c[0], c[1])
gmulbitm(s[5], s[1], c[0])
gmulbitm(s[6], s[0], c[1])
gmulbitc(c[2], c[0], c[1])
gadds(s[7], s[2], s[5])
gadds(s[8], s[7], s[6])
gaddm(self.args[0], s[8], c[2])
from Compiler import comparison
| true
| true
|
79020f67f255df76e3945226f5f0570bf89af103
| 5,497
|
py
|
Python
|
tfx/dsl/compiler/testdata/iris_pipeline_sync.py
|
Saiprasad16/tfx
|
c1e0704b2a83232469f55598efcdb7808b6c909f
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:41:06.000Z
|
2021-05-10T10:41:06.000Z
|
tfx/dsl/compiler/testdata/iris_pipeline_sync.py
|
Saiprasad16/tfx
|
c1e0704b2a83232469f55598efcdb7808b6c909f
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/compiler/testdata/iris_pipeline_sync.py
|
Saiprasad16/tfx
|
c1e0704b2a83232469f55598efcdb7808b6c909f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test pipeline for tfx.dsl.compiler.compiler."""
import os
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImporterNode
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import standard_artifacts
def create_test_pipeline():
"""Builds an Iris example pipeline with slight changes."""
pipeline_name = "iris"
iris_root = "iris_root"
serving_model_dir = os.path.join(iris_root, "serving_model", pipeline_name)
tfx_root = "tfx_root"
data_path = os.path.join(tfx_root, "data_path")
pipeline_root = os.path.join(tfx_root, "pipelines", pipeline_name)
example_gen = CsvExampleGen(input_base=data_path)
statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"])
importer = ImporterNode(
source_uri="m/y/u/r/i",
properties={
"split_names": "['train', 'eval']",
},
custom_properties={
"int_custom_property": 42,
"str_custom_property": "42",
},
artifact_type=standard_artifacts.Examples).with_id("my_importer")
another_statistics_gen = StatisticsGen(
examples=importer.outputs["result"]).with_id("another_statistics_gen")
schema_gen = SchemaGen(statistics=statistics_gen.outputs["statistics"])
example_validator = ExampleValidator(
statistics=statistics_gen.outputs["statistics"],
schema=schema_gen.outputs["schema"])
trainer = Trainer(
# Use RuntimeParameter as module_file to test out RuntimeParameter in
# compiler.
module_file=data_types.RuntimeParameter(
name="module_file",
default=os.path.join(iris_root, "iris_utils.py"),
ptype=str),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs["examples"],
schema=schema_gen.outputs["schema"],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
# Attaching `TrainerArgs` as platform config is not sensible practice,
# but is only for testing purpose.
eval_args=trainer_pb2.EvalArgs(num_steps=5)).with_platform_config(
config=trainer_pb2.TrainArgs(num_steps=2000))
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(
type=standard_artifacts.Model, producer_component_id=trainer.id),
model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id(
"latest_blessed_model_resolver")
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name="eval")],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
"sparse_categorical_accuracy":
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={"value": -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs["examples"],
model=trainer.outputs["model"],
baseline_model=model_resolver.outputs["model"],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs["model"],
model_blessing=evaluator.outputs["blessing"],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
another_statistics_gen,
importer,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
beam_pipeline_args=["--my_testing_beam_pipeline_args=foo"],
# Attaching `TrainerArgs` as platform config is not sensible practice,
# but is only for testing purpose.
platform_config=trainer_pb2.TrainArgs(num_steps=2000),
execution_mode=pipeline.ExecutionMode.SYNC)
| 38.711268
| 78
| 0.709478
|
import os
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImporterNode
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import standard_artifacts
def create_test_pipeline():
pipeline_name = "iris"
iris_root = "iris_root"
serving_model_dir = os.path.join(iris_root, "serving_model", pipeline_name)
tfx_root = "tfx_root"
data_path = os.path.join(tfx_root, "data_path")
pipeline_root = os.path.join(tfx_root, "pipelines", pipeline_name)
example_gen = CsvExampleGen(input_base=data_path)
statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"])
importer = ImporterNode(
source_uri="m/y/u/r/i",
properties={
"split_names": "['train', 'eval']",
},
custom_properties={
"int_custom_property": 42,
"str_custom_property": "42",
},
artifact_type=standard_artifacts.Examples).with_id("my_importer")
another_statistics_gen = StatisticsGen(
examples=importer.outputs["result"]).with_id("another_statistics_gen")
schema_gen = SchemaGen(statistics=statistics_gen.outputs["statistics"])
example_validator = ExampleValidator(
statistics=statistics_gen.outputs["statistics"],
schema=schema_gen.outputs["schema"])
trainer = Trainer(
module_file=data_types.RuntimeParameter(
name="module_file",
default=os.path.join(iris_root, "iris_utils.py"),
ptype=str),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs["examples"],
schema=schema_gen.outputs["schema"],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs(num_steps=5)).with_platform_config(
config=trainer_pb2.TrainArgs(num_steps=2000))
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(
type=standard_artifacts.Model, producer_component_id=trainer.id),
model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id(
"latest_blessed_model_resolver")
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name="eval")],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
"sparse_categorical_accuracy":
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={"value": -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs["examples"],
model=trainer.outputs["model"],
baseline_model=model_resolver.outputs["model"],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs["model"],
model_blessing=evaluator.outputs["blessing"],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
another_statistics_gen,
importer,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
beam_pipeline_args=["--my_testing_beam_pipeline_args=foo"],
platform_config=trainer_pb2.TrainArgs(num_steps=2000),
execution_mode=pipeline.ExecutionMode.SYNC)
| true
| true
|
79021088736c1e2ba4efc5f1d65dff9b2dd4a3bf
| 1,655
|
py
|
Python
|
main/class-1-dealing-with-complex-numbers/class-1-dealing-with-complex-numbers.py
|
EliahKagan/old-practice-snapshot
|
1b53897eac6902f8d867c8f154ce2a489abb8133
|
[
"0BSD"
] | null | null | null |
main/class-1-dealing-with-complex-numbers/class-1-dealing-with-complex-numbers.py
|
EliahKagan/old-practice-snapshot
|
1b53897eac6902f8d867c8f154ce2a489abb8133
|
[
"0BSD"
] | null | null | null |
main/class-1-dealing-with-complex-numbers/class-1-dealing-with-complex-numbers.py
|
EliahKagan/old-practice-snapshot
|
1b53897eac6902f8d867c8f154ce2a489abb8133
|
[
"0BSD"
] | null | null | null |
import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
return Complex(self.real + no.real, self.imaginary + no.imaginary)
def __sub__(self, no):
return Complex(self.real - no.real, self.imaginary - no.imaginary)
def __mul__(self, no):
return Complex(self.real * no.real - self.imaginary * no.imaginary,
self.real * no.imaginary + self.imaginary * no.real)
def __truediv__(self, no):
denom = no.real * no.real + no.imaginary * no.imaginary
re_numer = self.real * no.real + self.imaginary * no.imaginary
im_numer = self.imaginary * no.real - self.real * no.imaginary
return Complex(re_numer / denom, im_numer / denom)
def mod(self):
return Complex(math.sqrt(self.real * self.real + self.imaginary * self.imaginary), 0.0)
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| 35.212766
| 95
| 0.565559
|
import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
return Complex(self.real + no.real, self.imaginary + no.imaginary)
def __sub__(self, no):
return Complex(self.real - no.real, self.imaginary - no.imaginary)
def __mul__(self, no):
return Complex(self.real * no.real - self.imaginary * no.imaginary,
self.real * no.imaginary + self.imaginary * no.real)
def __truediv__(self, no):
denom = no.real * no.real + no.imaginary * no.imaginary
re_numer = self.real * no.real + self.imaginary * no.imaginary
im_numer = self.imaginary * no.real - self.real * no.imaginary
return Complex(re_numer / denom, im_numer / denom)
def mod(self):
return Complex(math.sqrt(self.real * self.real + self.imaginary * self.imaginary), 0.0)
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| true
| true
|
790211bfa017a8a2d1297cd673098bab535e254d
| 18,081
|
py
|
Python
|
src/main/resources/pydev_tunnel/tunnel_single_script.py
|
gdlg/k8s-debugger-pycharm-pluggin
|
30354f8e6ce3f979650c032e485137ec3f113a2c
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/pydev_tunnel/tunnel_single_script.py
|
gdlg/k8s-debugger-pycharm-pluggin
|
30354f8e6ce3f979650c032e485137ec3f113a2c
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/pydev_tunnel/tunnel_single_script.py
|
gdlg/k8s-debugger-pycharm-pluggin
|
30354f8e6ce3f979650c032e485137ec3f113a2c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import contextlib as __stickytape_contextlib
@__stickytape_contextlib.contextmanager
def __stickytape_temporary_dir():
import tempfile
import shutil
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
with __stickytape_temporary_dir() as __stickytape_working_dir:
def __stickytape_write_module(path, contents):
import os, os.path
def make_package(path):
parts = path.split("/")
partial_path = __stickytape_working_dir
for part in parts:
partial_path = os.path.join(partial_path, part)
if not os.path.exists(partial_path):
os.mkdir(partial_path)
with open(os.path.join(partial_path, "__init__.py"), "wb") as f:
f.write(b"\n")
make_package(os.path.dirname(path))
full_path = os.path.join(__stickytape_working_dir, path)
with open(full_path, "wb") as module_file:
module_file.write(contents)
import sys as __stickytape_sys
__stickytape_sys.path.insert(0, __stickytape_working_dir)
__stickytape_write_module('dispatcher.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport select\nimport socket\nfrom typing import Any, Dict, Union, TextIO, TYPE_CHECKING, Optional, List\n\n\nif TYPE_CHECKING:\n from processor import Processor\n from pydev_server_monitor import PydevServerMonitor\n\n\nclass Dispatcher:\n """\n The dispatcher class implements the main loop of the program,\n waiting for new I/O inputs (either from socket or pipe),\n then calling the relevant processor to handle the input.\n\n It also regularly calls monitors which are used to perform health checks\n on Pydev debug servers. If auto_stop is enabled, the loop exits when the last\n monitor terminates (i.e. no Pydev debug servers are running).\n """\n def __init__(self, auto_stop: bool):\n self._port_to_processors: "Dict[Any, Processor]" = {}\n self._socket_to_processors: Dict[Union[socket.socket, TextIO], Processor] = {}\n self._server_monitors: Dict[Any, PydevServerMonitor] = {}\n self._auto_stop = auto_stop\n\n def add_processor(self, processor: "Processor"):\n self._port_to_processors[processor.key] = processor\n self._socket_to_processors[processor.socket] = processor\n\n def remove_processor(self, processor: "Processor"):\n try:\n del self._port_to_processors[processor.key]\n del self._socket_to_processors[processor.socket]\n except KeyError:\n pass\n processor.close()\n\n def add_server_monitor(self, monitor: "PydevServerMonitor"):\n self._server_monitors[monitor.key] = monitor\n\n def remove_server_monitor(self, monitor: "PydevServerMonitor"):\n try:\n del self._server_monitors[monitor.key]\n except KeyError:\n pass\n\n def find_processor(self, key: Any) -> "Optional[Processor]":\n return self._port_to_processors.get(key, None)\n\n def get_all_processors(self) -> "List[Processor]":\n return list(self._port_to_processors.values())\n\n def dispatch_loop(self):\n while True:\n inputs = list(self._socket_to_processors.keys())\n \n inputs_ready, _, _ = select.select(inputs, [], [], 1)\n\n for input_socket in inputs_ready:\n processor = self._socket_to_processors[input_socket]\n processor.on_input_ready()\n\n for monitor in list(self._server_monitors.values()):\n monitor.monitor()\n\n if self._auto_stop and len(self._server_monitors) == 0:\n return\n \n')
__stickytape_write_module('processor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport abc\nimport socket\nfrom typing import Any, Union, TextIO\n\n\nclass Processor(abc.ABC):\n @property\n @abc.abstractmethod\n def key(self) -> Any: raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def socket(self) -> Union[socket.socket, TextIO]: raise NotImplementedError\n\n @abc.abstractmethod\n def on_input_ready(self) -> None: raise NotImplementedError\n\n @abc.abstractmethod\n def close(self) -> None: raise NotImplementedError\n')
__stickytape_write_module('pydev_server_monitor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_server_monitor")\n\n\nclass PydevServerMonitor:\n """\n Monitor a local Pydev debug server.\n\n When initialised, this class sends a message to the remote to create a corresponding listening server.\n When the Pydev server stops, this class detects that the server is no longer running\n and also close the remote server.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start monitoring the port {local_port}")\n self._dispatcher = dispatcher\n self._local_port = local_port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._is_terminated = False\n\n if self.is_socket_alive():\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to start new server on port {local_port}")\n server.write(local_port, "", "start_server\\n")\n else:\n logger.debug(f"server is not running")\n self._is_terminated = True\n\n @property\n def key(self) -> Any:\n return self._local_port\n \n def is_socket_alive(self) -> bool:\n if self._is_terminated:\n return False\n\n try:\n self._socket.bind((\'\', int(self._local_port)))\n except Exception:\n return True\n\n try:\n self._socket.shutdown(2)\n except:\n pass\n\n return False\n\n def monitor(self):\n if not self.is_socket_alive() and not self._is_terminated:\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to stop server on port {self._local_port}")\n server.write(self._local_port, "", "stop_server\\n")\n self._dispatcher.remove_server_monitor(self)\n self._is_terminated = True\n')
__stickytape_write_module('pipe_client_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport fcntl\nimport logging\nimport os\nimport io\nfrom typing import Any, BinaryIO\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pipe_client_server")\n\n\nclass PipeClientServer(Processor):\n """\n This class handles the communication between the local and remote hosts using a pipe.\n """\n def __init__(self, dispatcher: Dispatcher, stdin: BinaryIO, stdout: BinaryIO):\n logger.debug("create new pipe client/server")\n self._dispatcher = dispatcher\n self._read_buffer = ""\n self._stdin = stdin\n self._stdout = stdout\n orig_fl = fcntl.fcntl(self._stdin, fcntl.F_GETFL)\n fcntl.fcntl(self._stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)\n\n @property\n def key(self) -> Any:\n return None\n\n @property\n def socket(self) -> BinaryIO:\n return self._stdin\n\n def on_input_ready(self):\n data = self._stdin.read(1024)\n if len(data) == 0:\n logger.debug("the end of the pipe has been closed. Exiting.")\n import sys\n sys.exit(0)\n\n self._read_buffer += (data if isinstance(data, str) else data.decode())\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n args = command.split("\\t", 2)\n\n local_port = args[0]\n remote_port = args[1]\n command = args[2]\n\n if command == "start_client":\n self.start_client(local_port, remote_port)\n elif command == "stop_client":\n self.close_client(local_port, remote_port)\n elif command == "start_server":\n self.start_server(local_port)\n elif command == "stop_server":\n self.stop_server(local_port)\n else:\n self.dispatch_command_to_client(local_port, remote_port, command+"\\n")\n\n def write(self, local_port: str, remote_port: str, command: str):\n data = local_port+"\\t"+remote_port+"\\t"+command\n if isinstance(self._stdout, (io.BufferedIOBase, io.RawIOBase)):\n data = data.encode()\n self._stdout.write(data)\n self._stdout.flush()\n\n def start_server(self, local_port: str):\n logger.debug(f"start the server on {local_port}")\n from pydev_server import PydevServer\n server = PydevServer(self._dispatcher, local_port)\n self._dispatcher.add_processor(server)\n\n def stop_server(self, local_port: str):\n logger.debug(f"stop the server on {local_port}")\n server = self._dispatcher.find_processor(local_port)\n self._dispatcher.remove_processor(server)\n\n def start_client(self, local_port: str, remote_port: str):\n from pydev_client import PydevClient\n logger.debug(f"create new client (local: {local_port}, remote: {remote_port}")\n client = PydevClient(self._dispatcher, local_port, remote_port)\n self._dispatcher.add_processor(client)\n\n def dispatch_command_to_client(self, local_port: str, remote_port: str, command: str):\n key = (local_port, remote_port)\n client = self._dispatcher.find_processor(key)\n client.write(command)\n\n def close_client(self, local_port: str, remote_port: str):\n logger.debug(f"close the client (local: {local_port}, remote: {remote_port})")\n key = (local_port, remote_port)\n\n client = self._dispatcher.find_processor(key)\n\n if client is not None:\n self._dispatcher.remove_processor(client)\n\n def close(self) -> None:\n pass\n')
__stickytape_write_module('pydev_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pydev_server")\n\n\nclass PydevServer(Processor):\n """\n Listen on the remote pod for new debugger connection and create a new client for each connection.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start new server on port {local_port}")\n self._dispatcher = dispatcher\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.bind((\'\', int(local_port)))\n self._socket.listen(100)\n self._socket.setblocking(False)\n self._local_port = str(local_port)\n\n @property\n def key(self) -> Any:\n return self._local_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n \n def on_input_ready(self):\n client_socket, address = self._socket.accept()\n remote_port = address[1]\n\n from pydev_client import PydevClient\n from pipe_client_server import PipeClientServer\n\n self._dispatcher.add_processor(\n PydevClient(self._dispatcher, self._local_port, str(remote_port), client_socket))\n \n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n server.write(self._local_port, str(remote_port), "start_client\\n")\n\n def close(self):\n self._socket.close()\n')
__stickytape_write_module('pydev_client.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_client")\n\n\nclass PydevClient(Processor):\n """\n Client which reads Pydev commands (either on the local or remote) and send them through the pipe\n to the other end.\n\n The client also detects when a Pydev debug server starts a new server.\n When this happens, a monitor is created to handle this new server.\n (this is part of the support for multiproc in PyCharm)\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str, remote_port: str, client_socket=None):\n logger.debug(f"start new client (local: {local_port}, remote: {remote_port})")\n self._read_buffer = ""\n self._dispatcher = dispatcher\n\n if client_socket is None:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(("127.0.0.1", int(local_port)))\n else:\n self._socket = client_socket\n\n self._socket.setblocking(False)\n self._local_port = local_port\n self._remote_port = remote_port\n\n @property\n def key(self) -> Any:\n return self._local_port, self._remote_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n\n def write(self, data: str):\n logger.debug("write: "+data)\n self._socket.sendall(data.encode())\n\n def on_input_ready(self):\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n recv_data = self._socket.recv(1024).decode()\n if len(recv_data) == 0:\n # The socket has been closed\n logger.debug(f"stop this client, and ask remote to stop (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n server.write(self._local_port, self._remote_port, "stop_client\\n")\n self._dispatcher.remove_processor(self)\n\n self._read_buffer += recv_data\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n # Detect when PyCharm tries to start a new server\n args = command.split("\\t", 2)\n if len(args) == 3 and args[0] == "99" and args[1] == "-1":\n new_local_port = args[2]\n logger.debug(f"start monitoring for {new_local_port} (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n from pydev_server_monitor import PydevServerMonitor\n self._dispatcher.add_server_monitor(PydevServerMonitor(self._dispatcher, new_local_port))\n \n logger.debug("read : "+command)\n server.write(self._local_port, self._remote_port, command+"\\n")\n\n def close(self):\n self._socket.close()\n')
# Copyright 2021 Grégoire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
from dispatcher import Dispatcher
from pipe_client_server import PipeClientServer
from pydev_server_monitor import PydevServerMonitor
import sys
import subprocess
import os
import logging
is_local = len(sys.argv) > 1
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
format_header = "local" if is_local else "remote"
formatter = logging.Formatter('%(asctime)s - '+format_header+' %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if is_local:
#Local connection worker.
#
#Start the child connection (the remote), establish the pipe between the parent and child process,
#then add a monitor for the local Pydev server.
local_port = sys.argv[1]
worker_command = sys.argv[2:]
child = subprocess.Popen(worker_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
dispatcher = Dispatcher(auto_stop=True)
dispatcher.add_processor(PipeClientServer(dispatcher, child.stdout, child.stdin))
server_monitor = PydevServerMonitor(dispatcher, local_port)
if server_monitor.is_socket_alive():
dispatcher.add_server_monitor(server_monitor)
else:
# Remote connection worker.
#
# Establish the pipe between the parent and child process.
dispatcher = Dispatcher(auto_stop=False)
dispatcher.add_processor(PipeClientServer(dispatcher, sys.stdin, sys.stdout))
child = None
# Finally, start the main loop
dispatcher.dispatch_loop()
if child is not None:
child.terminate()
child.wait()
| 182.636364
| 3,959
| 0.673027
|
import contextlib as __stickytape_contextlib
@__stickytape_contextlib.contextmanager
def __stickytape_temporary_dir():
import tempfile
import shutil
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
with __stickytape_temporary_dir() as __stickytape_working_dir:
def __stickytape_write_module(path, contents):
import os, os.path
def make_package(path):
parts = path.split("/")
partial_path = __stickytape_working_dir
for part in parts:
partial_path = os.path.join(partial_path, part)
if not os.path.exists(partial_path):
os.mkdir(partial_path)
with open(os.path.join(partial_path, "__init__.py"), "wb") as f:
f.write(b"\n")
make_package(os.path.dirname(path))
full_path = os.path.join(__stickytape_working_dir, path)
with open(full_path, "wb") as module_file:
module_file.write(contents)
import sys as __stickytape_sys
__stickytape_sys.path.insert(0, __stickytape_working_dir)
__stickytape_write_module('dispatcher.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport select\nimport socket\nfrom typing import Any, Dict, Union, TextIO, TYPE_CHECKING, Optional, List\n\n\nif TYPE_CHECKING:\n from processor import Processor\n from pydev_server_monitor import PydevServerMonitor\n\n\nclass Dispatcher:\n """\n The dispatcher class implements the main loop of the program,\n waiting for new I/O inputs (either from socket or pipe),\n then calling the relevant processor to handle the input.\n\n It also regularly calls monitors which are used to perform health checks\n on Pydev debug servers. If auto_stop is enabled, the loop exits when the last\n monitor terminates (i.e. no Pydev debug servers are running).\n """\n def __init__(self, auto_stop: bool):\n self._port_to_processors: "Dict[Any, Processor]" = {}\n self._socket_to_processors: Dict[Union[socket.socket, TextIO], Processor] = {}\n self._server_monitors: Dict[Any, PydevServerMonitor] = {}\n self._auto_stop = auto_stop\n\n def add_processor(self, processor: "Processor"):\n self._port_to_processors[processor.key] = processor\n self._socket_to_processors[processor.socket] = processor\n\n def remove_processor(self, processor: "Processor"):\n try:\n del self._port_to_processors[processor.key]\n del self._socket_to_processors[processor.socket]\n except KeyError:\n pass\n processor.close()\n\n def add_server_monitor(self, monitor: "PydevServerMonitor"):\n self._server_monitors[monitor.key] = monitor\n\n def remove_server_monitor(self, monitor: "PydevServerMonitor"):\n try:\n del self._server_monitors[monitor.key]\n except KeyError:\n pass\n\n def find_processor(self, key: Any) -> "Optional[Processor]":\n return self._port_to_processors.get(key, None)\n\n def get_all_processors(self) -> "List[Processor]":\n return list(self._port_to_processors.values())\n\n def dispatch_loop(self):\n while True:\n inputs = list(self._socket_to_processors.keys())\n \n inputs_ready, _, _ = select.select(inputs, [], [], 1)\n\n for input_socket in inputs_ready:\n processor = self._socket_to_processors[input_socket]\n processor.on_input_ready()\n\n for monitor in list(self._server_monitors.values()):\n monitor.monitor()\n\n if self._auto_stop and len(self._server_monitors) == 0:\n return\n \n')
__stickytape_write_module('processor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport abc\nimport socket\nfrom typing import Any, Union, TextIO\n\n\nclass Processor(abc.ABC):\n @property\n @abc.abstractmethod\n def key(self) -> Any: raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def socket(self) -> Union[socket.socket, TextIO]: raise NotImplementedError\n\n @abc.abstractmethod\n def on_input_ready(self) -> None: raise NotImplementedError\n\n @abc.abstractmethod\n def close(self) -> None: raise NotImplementedError\n')
__stickytape_write_module('pydev_server_monitor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_server_monitor")\n\n\nclass PydevServerMonitor:\n """\n Monitor a local Pydev debug server.\n\n When initialised, this class sends a message to the remote to create a corresponding listening server.\n When the Pydev server stops, this class detects that the server is no longer running\n and also close the remote server.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start monitoring the port {local_port}")\n self._dispatcher = dispatcher\n self._local_port = local_port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._is_terminated = False\n\n if self.is_socket_alive():\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to start new server on port {local_port}")\n server.write(local_port, "", "start_server\\n")\n else:\n logger.debug(f"server is not running")\n self._is_terminated = True\n\n @property\n def key(self) -> Any:\n return self._local_port\n \n def is_socket_alive(self) -> bool:\n if self._is_terminated:\n return False\n\n try:\n self._socket.bind((\'\', int(self._local_port)))\n except Exception:\n return True\n\n try:\n self._socket.shutdown(2)\n except:\n pass\n\n return False\n\n def monitor(self):\n if not self.is_socket_alive() and not self._is_terminated:\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to stop server on port {self._local_port}")\n server.write(self._local_port, "", "stop_server\\n")\n self._dispatcher.remove_server_monitor(self)\n self._is_terminated = True\n')
__stickytape_write_module('pipe_client_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport fcntl\nimport logging\nimport os\nimport io\nfrom typing import Any, BinaryIO\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pipe_client_server")\n\n\nclass PipeClientServer(Processor):\n """\n This class handles the communication between the local and remote hosts using a pipe.\n """\n def __init__(self, dispatcher: Dispatcher, stdin: BinaryIO, stdout: BinaryIO):\n logger.debug("create new pipe client/server")\n self._dispatcher = dispatcher\n self._read_buffer = ""\n self._stdin = stdin\n self._stdout = stdout\n orig_fl = fcntl.fcntl(self._stdin, fcntl.F_GETFL)\n fcntl.fcntl(self._stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)\n\n @property\n def key(self) -> Any:\n return None\n\n @property\n def socket(self) -> BinaryIO:\n return self._stdin\n\n def on_input_ready(self):\n data = self._stdin.read(1024)\n if len(data) == 0:\n logger.debug("the end of the pipe has been closed. Exiting.")\n import sys\n sys.exit(0)\n\n self._read_buffer += (data if isinstance(data, str) else data.decode())\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n args = command.split("\\t", 2)\n\n local_port = args[0]\n remote_port = args[1]\n command = args[2]\n\n if command == "start_client":\n self.start_client(local_port, remote_port)\n elif command == "stop_client":\n self.close_client(local_port, remote_port)\n elif command == "start_server":\n self.start_server(local_port)\n elif command == "stop_server":\n self.stop_server(local_port)\n else:\n self.dispatch_command_to_client(local_port, remote_port, command+"\\n")\n\n def write(self, local_port: str, remote_port: str, command: str):\n data = local_port+"\\t"+remote_port+"\\t"+command\n if isinstance(self._stdout, (io.BufferedIOBase, io.RawIOBase)):\n data = data.encode()\n self._stdout.write(data)\n self._stdout.flush()\n\n def start_server(self, local_port: str):\n logger.debug(f"start the server on {local_port}")\n from pydev_server import PydevServer\n server = PydevServer(self._dispatcher, local_port)\n self._dispatcher.add_processor(server)\n\n def stop_server(self, local_port: str):\n logger.debug(f"stop the server on {local_port}")\n server = self._dispatcher.find_processor(local_port)\n self._dispatcher.remove_processor(server)\n\n def start_client(self, local_port: str, remote_port: str):\n from pydev_client import PydevClient\n logger.debug(f"create new client (local: {local_port}, remote: {remote_port}")\n client = PydevClient(self._dispatcher, local_port, remote_port)\n self._dispatcher.add_processor(client)\n\n def dispatch_command_to_client(self, local_port: str, remote_port: str, command: str):\n key = (local_port, remote_port)\n client = self._dispatcher.find_processor(key)\n client.write(command)\n\n def close_client(self, local_port: str, remote_port: str):\n logger.debug(f"close the client (local: {local_port}, remote: {remote_port})")\n key = (local_port, remote_port)\n\n client = self._dispatcher.find_processor(key)\n\n if client is not None:\n self._dispatcher.remove_processor(client)\n\n def close(self) -> None:\n pass\n')
__stickytape_write_module('pydev_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pydev_server")\n\n\nclass PydevServer(Processor):\n """\n Listen on the remote pod for new debugger connection and create a new client for each connection.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start new server on port {local_port}")\n self._dispatcher = dispatcher\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.bind((\'\', int(local_port)))\n self._socket.listen(100)\n self._socket.setblocking(False)\n self._local_port = str(local_port)\n\n @property\n def key(self) -> Any:\n return self._local_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n \n def on_input_ready(self):\n client_socket, address = self._socket.accept()\n remote_port = address[1]\n\n from pydev_client import PydevClient\n from pipe_client_server import PipeClientServer\n\n self._dispatcher.add_processor(\n PydevClient(self._dispatcher, self._local_port, str(remote_port), client_socket))\n \n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n server.write(self._local_port, str(remote_port), "start_client\\n")\n\n def close(self):\n self._socket.close()\n')
__stickytape_write_module('pydev_client.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_client")\n\n\nclass PydevClient(Processor):\n """\n Client which reads Pydev commands (either on the local or remote) and send them through the pipe\n to the other end.\n\n The client also detects when a Pydev debug server starts a new server.\n When this happens, a monitor is created to handle this new server.\n (this is part of the support for multiproc in PyCharm)\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str, remote_port: str, client_socket=None):\n logger.debug(f"start new client (local: {local_port}, remote: {remote_port})")\n self._read_buffer = ""\n self._dispatcher = dispatcher\n\n if client_socket is None:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(("127.0.0.1", int(local_port)))\n else:\n self._socket = client_socket\n\n self._socket.setblocking(False)\n self._local_port = local_port\n self._remote_port = remote_port\n\n @property\n def key(self) -> Any:\n return self._local_port, self._remote_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n\n def write(self, data: str):\n logger.debug("write: "+data)\n self._socket.sendall(data.encode())\n\n def on_input_ready(self):\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n recv_data = self._socket.recv(1024).decode()\n if len(recv_data) == 0:\n # The socket has been closed\n logger.debug(f"stop this client, and ask remote to stop (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n server.write(self._local_port, self._remote_port, "stop_client\\n")\n self._dispatcher.remove_processor(self)\n\n self._read_buffer += recv_data\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n # Detect when PyCharm tries to start a new server\n args = command.split("\\t", 2)\n if len(args) == 3 and args[0] == "99" and args[1] == "-1":\n new_local_port = args[2]\n logger.debug(f"start monitoring for {new_local_port} (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n from pydev_server_monitor import PydevServerMonitor\n self._dispatcher.add_server_monitor(PydevServerMonitor(self._dispatcher, new_local_port))\n \n logger.debug("read : "+command)\n server.write(self._local_port, self._remote_port, command+"\\n")\n\n def close(self):\n self._socket.close()\n')
from dispatcher import Dispatcher
from pipe_client_server import PipeClientServer
from pydev_server_monitor import PydevServerMonitor
import sys
import subprocess
import os
import logging
is_local = len(sys.argv) > 1
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
format_header = "local" if is_local else "remote"
formatter = logging.Formatter('%(asctime)s - '+format_header+' %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if is_local:
local_port = sys.argv[1]
worker_command = sys.argv[2:]
child = subprocess.Popen(worker_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
dispatcher = Dispatcher(auto_stop=True)
dispatcher.add_processor(PipeClientServer(dispatcher, child.stdout, child.stdin))
server_monitor = PydevServerMonitor(dispatcher, local_port)
if server_monitor.is_socket_alive():
dispatcher.add_server_monitor(server_monitor)
else:
dispatcher = Dispatcher(auto_stop=False)
dispatcher.add_processor(PipeClientServer(dispatcher, sys.stdin, sys.stdout))
child = None
dispatcher.dispatch_loop()
if child is not None:
child.terminate()
child.wait()
| true
| true
|
790212da0bf2d0d6dde2cc84c309acc96c1a0b52
| 445
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/layout/image/_sizey.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/image/_sizey.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/image/_sizey.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class SizeyValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizey", parent_name="layout.image", **kwargs):
super(SizeyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 34.230769
| 82
| 0.653933
|
import _plotly_utils.basevalidators
class SizeyValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizey", parent_name="layout.image", **kwargs):
super(SizeyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
| true
| true
|
790213553c54c37ddcfefa0a6d8975e443567c48
| 397
|
py
|
Python
|
src/config/urls.py
|
JeremySilvaSilva/Django-Rest-Framework-User-Template
|
057e9fb44da05f9ea23617d3adeb26af2913575d
|
[
"MIT"
] | null | null | null |
src/config/urls.py
|
JeremySilvaSilva/Django-Rest-Framework-User-Template
|
057e9fb44da05f9ea23617d3adeb26af2913575d
|
[
"MIT"
] | 3
|
2020-01-10T15:47:26.000Z
|
2020-06-06T01:14:17.000Z
|
src/config/urls.py
|
JeremyAndress/API-User-Template
|
057e9fb44da05f9ea23617d3adeb26af2913575d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from rest_framework.schemas import get_schema_view
VERSION = 'V1.0.0'
urlpatterns = [
path('admin/', admin.site.urls),
url('api/{}/user/'.format(VERSION),include('app.user.urls',namespace='user')),
url('api/{}/core/'.format(VERSION),include('app.core.urls',namespace='core')),
]
| 30.538462
| 82
| 0.715365
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from rest_framework.schemas import get_schema_view
VERSION = 'V1.0.0'
urlpatterns = [
path('admin/', admin.site.urls),
url('api/{}/user/'.format(VERSION),include('app.user.urls',namespace='user')),
url('api/{}/core/'.format(VERSION),include('app.core.urls',namespace='core')),
]
| true
| true
|
790213d4b029247f6dc2efb09199f7ffc4857cde
| 29,224
|
py
|
Python
|
python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py
|
TingquanGao/Paddle
|
9b1015d90b4d498ab58df7cff2c3ed27863ce970
|
[
"Apache-2.0"
] | 3
|
2021-06-08T14:24:36.000Z
|
2021-06-08T14:24:38.000Z
|
python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2021-03-17T07:53:43.000Z
|
2021-03-17T07:53:43.000Z
|
python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2021-08-04T14:28:58.000Z
|
2021-08-04T14:28:58.000Z
|
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import os
import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
paddle.enable_static()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CPU_NUM"] = "1"
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
return loss
def residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(
name='image',
shape=[1, 1, 32, 32],
dtype='float32',
append_batch_size=False)
label = fluid.layers.data(
name='label', shape=[1, 1], dtype='int64', append_batch_size=False)
hidden = data
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
matmul_weight = fluid.layers.create_parameter(
shape=[1, 16, 32, 32], dtype='float32')
hidden = fluid.layers.matmul(hidden, matmul_weight, True, True)
if quant_skip_pattern:
with fluid.name_scope(quant_skip_pattern):
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
else:
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
fc = fluid.layers.fc(input=pool, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
def conv_net(img, label, quant_skip_pattern):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
pool_type='max',
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
pool_type='avg',
act="relu")
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
with fluid.name_scope(quant_skip_pattern):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return avg_loss
class TestQuantizationTransformPass(unittest.TestCase):
def setUp(self):
self.quantizable_op_and_inputs = {
'conv2d': ['Input', 'Filter'],
'depthwise_conv2d': ['Input', 'Filter'],
'mul': ['X', 'Y']
}
self.quantizable_grad_op_inputs = {
'conv2d_grad': ['Input', 'Filter'],
'depthwise_conv2d_grad': ['Input', 'Filter'],
'mul_grad': ['X', 'Y']
}
def check_program(self, program):
quantized_ops = set()
for block in program.blocks:
for op in block.ops:
# check forward
if op.type in self.quantizable_op_and_inputs:
for arg_name in op.input_arg_names:
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
quantized_ops.add(arg_name)
for op in block.ops:
# check backward
if op.type in self.quantizable_grad_op_inputs:
for pname in self.quantizable_grad_op_inputs[op.type]:
arg_name = op.input(pname)[0]
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
self.assertTrue(arg_name in quantized_ops)
def linear_fc_quant(self,
activation_quant_type,
weight_quantize_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_fc_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_fc_' + activation_quant_type,
val_marked_nodes)
def test_linear_fc_quant_abs_max(self):
self.linear_fc_quant('abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_range_abs_max(self):
self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_moving_average_abs_max(self):
self.linear_fc_quant(
'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True)
def residual_block_quant(self,
activation_quant_type,
weight_quantize_type,
quantizable_op_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=quantizable_op_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_residual_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_residual_' + activation_quant_type,
val_marked_nodes)
def test_residual_block_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_range_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_moving_average_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'moving_average_abs_max',
'channel_wise_abs_max',
quantizable_op_type,
for_ci=True)
class TestQuantizationFreezePass(unittest.TestCase):
def freeze_graph(self,
use_cuda,
seed,
activation_quant_type,
bias_correction=False,
weight_quant_type='abs_max',
for_ci=True,
quant_skip_pattern='skip_quant'):
def build_program(main, startup, is_test):
main.random_seed = seed
startup.random_seed = seed
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
loss = conv_net(img, label, quant_skip_pattern)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss
random.seed(0)
np.random.seed(0)
main = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
feeds, loss = build_program(main, startup, False)
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quant_type,
skip_pattern=quant_skip_pattern)
transform_pass.apply(main_graph)
transform_pass.apply(test_graph)
dev_name = '_gpu_' if use_cuda else '_cpu_'
if not for_ci:
marked_nodes = set()
for op in main_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
main_graph.draw('.', 'main' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
quantized_test_program = test_graph.to_program()
iters = 5
batch_size = 8
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
with fluid.scope_guard(scope):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[loss])
if not for_ci:
print('{}: {}'.format('loss' + dev_name +
activation_quant_type + '_' +
weight_quant_type, loss_v))
test_data = next(test_reader())
with fluid.program_guard(quantized_test_program):
w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',
quantized_test_program)
# Testing
with fluid.scope_guard(scope):
test_loss1, w_quant = exe.run(program=quantized_test_program,
feed=feeder.feed(test_data),
fetch_list=[loss, w_var])
# Freeze graph for inference, but the weight of fc/conv is still float type.
freeze_pass = QuantizationFreezePass(
scope=scope, place=place, bias_correction=bias_correction, \
weight_quantize_type=weight_quant_type)
freeze_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_freeze' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
server_program = test_graph.to_program()
with fluid.scope_guard(scope):
test_loss2, = exe.run(program=server_program,
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
if not for_ci:
print(
'{}: {}'.format('test_loss1' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss1))
print(
'{}: {}'.format('test_loss2' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss2))
w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor())
# Maybe failed, this is due to the calculation precision
# self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))
if not for_ci:
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
print('{}: {}'.format('w_quant' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_quant)))
# Convert parameter to 8-bit.
convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
convert_int8_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_int8' + dev_name + activation_quant_type
+ '_' + weight_quant_type, marked_nodes)
server_program_int8 = test_graph.to_program()
# Save the 8-bit parameter and model file.
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
server_program_int8)
# Test whether the 8-bit parameter and model file can be loaded successfully.
[infer, feed, fetch] = fluid.io.load_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, exe)
# Check the loaded 8-bit weight.
w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor())
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
if not for_ci:
print('{}: {}'.format('w_8bit' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_8bit)))
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_mobile' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
mobile_program = test_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'mobile_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
mobile_program)
def test_freeze_graph_cuda_dynamic(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_dynamic(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cuda_static(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
bias_correction=True,
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
bias_correction=True,
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_static(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def quant_dequant_residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
data2 = fluid.layers.data(
name='matmul_input', shape=[16, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data1
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = fluid.layers.matmul(hidden, data2, True, True)
if isinstance(quant_skip_pattern, str):
with fluid.name_scope(quant_skip_pattern):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
elif isinstance(quant_skip_pattern, list):
assert len(
quant_skip_pattern
) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.'
with fluid.name_scope(quant_skip_pattern[0]):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
with fluid.name_scope(quant_skip_pattern[1]):
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
else:
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
class TestAddQuantDequantPass(unittest.TestCase):
def setUp(self):
self._target_ops = {'elementwise_add', 'pool2d'}
self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'}
def check_graph(self, graph, skip_pattern=None):
ops = graph.all_op_nodes()
for op_node in ops:
if op_node.name() in self._target_ops:
user_skipped = False
if isinstance(skip_pattern, list):
user_skipped = op_node.op().has_attr("op_namescope") and \
any(pattern in op_node.op().attr("op_namescope") for pattern in skip_pattern)
elif isinstance(skip_pattern, str):
user_skipped = op_node.op().has_attr("op_namescope") and \
op_node.op().attr("op_namescope").find(skip_pattern) != -1
if user_skipped:
continue
in_nodes_all_not_persistable = True
for input_name in op_node.input_arg_names():
in_node = graph._find_node_by_name(op_node.inputs,
input_name)
in_nodes_all_not_persistable = (
in_nodes_all_not_persistable and
not in_node.persistable())
if not in_nodes_all_not_persistable:
continue
input_names = op_node.input_arg_names()
for input_name in input_names:
self.assertTrue(input_name.endswith('.quant_dequant'))
def residual_block_quant(self,
quantizable_op_type,
skip_pattern=None,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = quant_dequant_residual_block(2, skip_pattern)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
add_quant_dequant_pass = AddQuantDequantPass(
scope=fluid.global_scope(),
place=place,
skip_pattern=skip_pattern,
quantizable_op_type=quantizable_op_type)
add_quant_dequant_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quant') > -1:
marked_nodes.add(op)
graph.draw('.', 'add_quant_dequant_graph', marked_nodes)
self.check_graph(graph, skip_pattern)
program = graph.to_program()
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quant') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_add_quant_dequant_graph', val_marked_nodes)
def test_residual_block(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern=None, for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern='skip_quant', for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type,
skip_pattern=['skip_quant1', 'skip_quant2'],
for_ci=True)
if __name__ == '__main__':
unittest.main()
| 42.29233
| 112
| 0.56919
|
import os
import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
paddle.enable_static()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CPU_NUM"] = "1"
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
return loss
def residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(
name='image',
shape=[1, 1, 32, 32],
dtype='float32',
append_batch_size=False)
label = fluid.layers.data(
name='label', shape=[1, 1], dtype='int64', append_batch_size=False)
hidden = data
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
matmul_weight = fluid.layers.create_parameter(
shape=[1, 16, 32, 32], dtype='float32')
hidden = fluid.layers.matmul(hidden, matmul_weight, True, True)
if quant_skip_pattern:
with fluid.name_scope(quant_skip_pattern):
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
else:
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
fc = fluid.layers.fc(input=pool, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
def conv_net(img, label, quant_skip_pattern):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
pool_type='max',
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
pool_type='avg',
act="relu")
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
with fluid.name_scope(quant_skip_pattern):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return avg_loss
class TestQuantizationTransformPass(unittest.TestCase):
def setUp(self):
self.quantizable_op_and_inputs = {
'conv2d': ['Input', 'Filter'],
'depthwise_conv2d': ['Input', 'Filter'],
'mul': ['X', 'Y']
}
self.quantizable_grad_op_inputs = {
'conv2d_grad': ['Input', 'Filter'],
'depthwise_conv2d_grad': ['Input', 'Filter'],
'mul_grad': ['X', 'Y']
}
def check_program(self, program):
quantized_ops = set()
for block in program.blocks:
for op in block.ops:
if op.type in self.quantizable_op_and_inputs:
for arg_name in op.input_arg_names:
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
quantized_ops.add(arg_name)
for op in block.ops:
if op.type in self.quantizable_grad_op_inputs:
for pname in self.quantizable_grad_op_inputs[op.type]:
arg_name = op.input(pname)[0]
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
self.assertTrue(arg_name in quantized_ops)
def linear_fc_quant(self,
activation_quant_type,
weight_quantize_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_fc_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_fc_' + activation_quant_type,
val_marked_nodes)
def test_linear_fc_quant_abs_max(self):
self.linear_fc_quant('abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_range_abs_max(self):
self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_moving_average_abs_max(self):
self.linear_fc_quant(
'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True)
def residual_block_quant(self,
activation_quant_type,
weight_quantize_type,
quantizable_op_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=quantizable_op_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_residual_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_residual_' + activation_quant_type,
val_marked_nodes)
def test_residual_block_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_range_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_moving_average_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'moving_average_abs_max',
'channel_wise_abs_max',
quantizable_op_type,
for_ci=True)
class TestQuantizationFreezePass(unittest.TestCase):
def freeze_graph(self,
use_cuda,
seed,
activation_quant_type,
bias_correction=False,
weight_quant_type='abs_max',
for_ci=True,
quant_skip_pattern='skip_quant'):
def build_program(main, startup, is_test):
main.random_seed = seed
startup.random_seed = seed
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
loss = conv_net(img, label, quant_skip_pattern)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss
random.seed(0)
np.random.seed(0)
main = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
feeds, loss = build_program(main, startup, False)
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quant_type,
skip_pattern=quant_skip_pattern)
transform_pass.apply(main_graph)
transform_pass.apply(test_graph)
dev_name = '_gpu_' if use_cuda else '_cpu_'
if not for_ci:
marked_nodes = set()
for op in main_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
main_graph.draw('.', 'main' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
quantized_test_program = test_graph.to_program()
iters = 5
batch_size = 8
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
with fluid.scope_guard(scope):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[loss])
if not for_ci:
print('{}: {}'.format('loss' + dev_name +
activation_quant_type + '_' +
weight_quant_type, loss_v))
test_data = next(test_reader())
with fluid.program_guard(quantized_test_program):
w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',
quantized_test_program)
with fluid.scope_guard(scope):
test_loss1, w_quant = exe.run(program=quantized_test_program,
feed=feeder.feed(test_data),
fetch_list=[loss, w_var])
freeze_pass = QuantizationFreezePass(
scope=scope, place=place, bias_correction=bias_correction, \
weight_quantize_type=weight_quant_type)
freeze_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_freeze' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
server_program = test_graph.to_program()
with fluid.scope_guard(scope):
test_loss2, = exe.run(program=server_program,
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
if not for_ci:
print(
'{}: {}'.format('test_loss1' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss1))
print(
'{}: {}'.format('test_loss2' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss2))
w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor())
if not for_ci:
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
print('{}: {}'.format('w_quant' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_quant)))
convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
convert_int8_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_int8' + dev_name + activation_quant_type
+ '_' + weight_quant_type, marked_nodes)
server_program_int8 = test_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
server_program_int8)
[infer, feed, fetch] = fluid.io.load_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, exe)
w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor())
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
if not for_ci:
print('{}: {}'.format('w_8bit' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_8bit)))
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_mobile' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
mobile_program = test_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'mobile_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
mobile_program)
def test_freeze_graph_cuda_dynamic(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_dynamic(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cuda_static(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
bias_correction=True,
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
bias_correction=True,
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_static(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def quant_dequant_residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
data2 = fluid.layers.data(
name='matmul_input', shape=[16, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data1
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = fluid.layers.matmul(hidden, data2, True, True)
if isinstance(quant_skip_pattern, str):
with fluid.name_scope(quant_skip_pattern):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
elif isinstance(quant_skip_pattern, list):
assert len(
quant_skip_pattern
) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.'
with fluid.name_scope(quant_skip_pattern[0]):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
with fluid.name_scope(quant_skip_pattern[1]):
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
else:
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
class TestAddQuantDequantPass(unittest.TestCase):
def setUp(self):
self._target_ops = {'elementwise_add', 'pool2d'}
self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'}
def check_graph(self, graph, skip_pattern=None):
ops = graph.all_op_nodes()
for op_node in ops:
if op_node.name() in self._target_ops:
user_skipped = False
if isinstance(skip_pattern, list):
user_skipped = op_node.op().has_attr("op_namescope") and \
any(pattern in op_node.op().attr("op_namescope") for pattern in skip_pattern)
elif isinstance(skip_pattern, str):
user_skipped = op_node.op().has_attr("op_namescope") and \
op_node.op().attr("op_namescope").find(skip_pattern) != -1
if user_skipped:
continue
in_nodes_all_not_persistable = True
for input_name in op_node.input_arg_names():
in_node = graph._find_node_by_name(op_node.inputs,
input_name)
in_nodes_all_not_persistable = (
in_nodes_all_not_persistable and
not in_node.persistable())
if not in_nodes_all_not_persistable:
continue
input_names = op_node.input_arg_names()
for input_name in input_names:
self.assertTrue(input_name.endswith('.quant_dequant'))
def residual_block_quant(self,
quantizable_op_type,
skip_pattern=None,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = quant_dequant_residual_block(2, skip_pattern)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
add_quant_dequant_pass = AddQuantDequantPass(
scope=fluid.global_scope(),
place=place,
skip_pattern=skip_pattern,
quantizable_op_type=quantizable_op_type)
add_quant_dequant_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quant') > -1:
marked_nodes.add(op)
graph.draw('.', 'add_quant_dequant_graph', marked_nodes)
self.check_graph(graph, skip_pattern)
program = graph.to_program()
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quant') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_add_quant_dequant_graph', val_marked_nodes)
def test_residual_block(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern=None, for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern='skip_quant', for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type,
skip_pattern=['skip_quant1', 'skip_quant2'],
for_ci=True)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7902143a3d4765fbef4e037174c5457d06039dde
| 1,867
|
py
|
Python
|
parse_reference.py
|
Dewdis/scholar_tree
|
300585e77dc06ee5f4297c5a699eccf76200e934
|
[
"MIT"
] | null | null | null |
parse_reference.py
|
Dewdis/scholar_tree
|
300585e77dc06ee5f4297c5a699eccf76200e934
|
[
"MIT"
] | null | null | null |
parse_reference.py
|
Dewdis/scholar_tree
|
300585e77dc06ee5f4297c5a699eccf76200e934
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Possible string formats:
<author(s)> <title> <source> <year>
"""
import re
import pdf
CRED = '\033[91m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CEND = '\033[0m'
def extract_references_list_by_keyword(text, keyword):
print(text)
match_res = re.search(keyword, text)
ref_text = text[match_res.span()[0]:]
# print(ref_text)
# WARNING: not more than 999 references!
index_re = re.compile('\[[0-9]([0-9]|)([0-9]|)\]')
ref_pos = []
for ref in index_re.finditer(ref_text):
ref_pos.append(ref.span()[0])
ref_pos.append(len(ref_text))
for i in range(len(ref_pos)-1):
print(CYELLOW + ref_text[ref_pos[i]:ref_pos[i+1]] + CEND)
def extract_references_list(text):
res = []
buffer = ""
state = 0
for i in reversed(range(0, len(text)-1)):
c = text[i]
buffer = c + buffer
if state == 0:
if c == ']':
state = 1
elif state == 1:
if c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
state = 2
else:
state = 0
elif state == 2:
if c == '[':
res.append(buffer)
if buffer[1] == '1' and buffer[2] == ']':
break
state = 0
buffer = ""
else:
print("Unknown state")
raise
return reversed(res)
def extract_article_from_reference(string):
pass
# return (autors, title, date)
if __name__ == '__main__':
import sys
text = pdf.extract_text(sys.argv[1])
print(text)
# zextract_references_list_by_keyword('REFERENCES')
ref_list = extract_references_list(text)
for ref in ref_list:
print(CYELLOW + ref + CEND)
| 22.768293
| 71
| 0.528656
|
import re
import pdf
CRED = '\033[91m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CEND = '\033[0m'
def extract_references_list_by_keyword(text, keyword):
print(text)
match_res = re.search(keyword, text)
ref_text = text[match_res.span()[0]:]
index_re = re.compile('\[[0-9]([0-9]|)([0-9]|)\]')
ref_pos = []
for ref in index_re.finditer(ref_text):
ref_pos.append(ref.span()[0])
ref_pos.append(len(ref_text))
for i in range(len(ref_pos)-1):
print(CYELLOW + ref_text[ref_pos[i]:ref_pos[i+1]] + CEND)
def extract_references_list(text):
res = []
buffer = ""
state = 0
for i in reversed(range(0, len(text)-1)):
c = text[i]
buffer = c + buffer
if state == 0:
if c == ']':
state = 1
elif state == 1:
if c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
state = 2
else:
state = 0
elif state == 2:
if c == '[':
res.append(buffer)
if buffer[1] == '1' and buffer[2] == ']':
break
state = 0
buffer = ""
else:
print("Unknown state")
raise
return reversed(res)
def extract_article_from_reference(string):
pass
if __name__ == '__main__':
import sys
text = pdf.extract_text(sys.argv[1])
print(text)
ref_list = extract_references_list(text)
for ref in ref_list:
print(CYELLOW + ref + CEND)
| true
| true
|
790214d60cffa2a494c5dcf9a8ff558464d131ab
| 149
|
py
|
Python
|
src/aijack/defense/ckks/__init__.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 24
|
2021-11-17T02:16:47.000Z
|
2022-03-27T01:04:08.000Z
|
src/aijack/defense/ckks/__init__.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 9
|
2021-12-03T06:09:27.000Z
|
2022-03-29T06:33:53.000Z
|
src/aijack/defense/ckks/__init__.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 5
|
2022-01-12T09:58:04.000Z
|
2022-03-17T09:29:04.000Z
|
from .encoder import CKKSEncoder # noqa: F401
from .encrypter import CKKSEncrypter # noqa: F401
from .plaintext import CKKSPlaintext # noqa: F401
| 37.25
| 50
| 0.778523
|
from .encoder import CKKSEncoder
from .encrypter import CKKSEncrypter
from .plaintext import CKKSPlaintext
| true
| true
|
790214dde414abac4f210f7302a358649ab58d6e
| 417
|
py
|
Python
|
src/mltoolset/__init__.py
|
rpeloff/multimodal_one-shot_learning
|
b08b9deffea5c656f07a616f31850192e32c2aee
|
[
"MIT"
] | 11
|
2019-05-14T08:52:32.000Z
|
2021-11-09T10:01:44.000Z
|
src/mltoolset/__init__.py
|
rpeloff/multimodal_one-shot_learning
|
b08b9deffea5c656f07a616f31850192e32c2aee
|
[
"MIT"
] | null | null | null |
src/mltoolset/__init__.py
|
rpeloff/multimodal_one-shot_learning
|
b08b9deffea5c656f07a616f31850192e32c2aee
|
[
"MIT"
] | 2
|
2019-08-27T08:49:44.000Z
|
2021-02-01T15:08:16.000Z
|
"""Welcome to MLToolset, a package to simplify machine learning research!
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: May 2018
"""
from . import data
from . import nearest_neighbour
from . import neural_blocks
from . import siamese
from . import training
from . import utils
from ._globals import TF_FLOAT
from ._globals import TF_INT
from ._globals import NP_FLOAT
from ._globals import NP_INT
| 19.857143
| 73
| 0.786571
|
from . import data
from . import nearest_neighbour
from . import neural_blocks
from . import siamese
from . import training
from . import utils
from ._globals import TF_FLOAT
from ._globals import TF_INT
from ._globals import NP_FLOAT
from ._globals import NP_INT
| true
| true
|
790216d5aa81a46dd367ab753c7e45365e05f79a
| 2,531
|
py
|
Python
|
v1/list/models.py
|
atroxtartarus/openeats-api
|
a4e781e82cb34bc7a68ce9b1b6ab2f1bd35382df
|
[
"MIT"
] | 11
|
2018-03-04T18:03:30.000Z
|
2021-09-04T17:03:24.000Z
|
v1/list/models.py
|
atroxtartarus/openeats-api
|
a4e781e82cb34bc7a68ce9b1b6ab2f1bd35382df
|
[
"MIT"
] | 42
|
2020-06-05T14:55:57.000Z
|
2021-07-14T05:58:09.000Z
|
v1/list/models.py
|
jzyrobert/openeats-api
|
35e1f1c8e61812e405ec15cc66d8e543d15409b6
|
[
"MIT"
] | 40
|
2018-06-22T18:58:19.000Z
|
2022-01-31T08:34:05.000Z
|
#!/usr/bin/env python
# encoding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from v1.recipe.models import Recipe
class GroceryList(models.Model):
"""
The GroceryList is the core of list app.
It offers a home to many GroceryItems.
title = The name of the GroceryList.
slug = The HTML safe name of the GroceryList.
author = The User who created the GroceryList.
pub_date = The date that the GroceryList was created on.
"""
title = models.CharField(_("grocery list title"), max_length=250)
slug = AutoSlugField(_('slug'), populate_from='title')
author = models.ForeignKey(User, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def __str__(self):
return '%s' % self.title
def item_count(self):
"""get the number of items in the list"""
return GroceryItem.objects.filter(list=self).count()
class GroceryItem(models.Model):
"""
The GroceryItem is an item on a GroceryList.
list = The GroceryList that owns the GroceryItem.
title = The name of the GroceryItem.
completed = Whether or not the GroceryItem has been purchased or
added to the users shopping cart in the supermarket.
order = The order of the item in the GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE, related_name='items')
title = models.CharField(_("title"), max_length=550)
completed = models.BooleanField(_("completed"), default=False)
order = models.IntegerField(_("order"), default=0)
class Meta:
ordering = ['list_id', 'order', 'pk']
def __str__(self):
return '%s' % self.title
class GroceryShared(models.Model):
"""
Determines whether or not a GroceryList is shared to another user.
Shared lists allow other uses to add/delete/edit the GroceryList.
list = The GroceryList to be shared.
shared_by = The User that shared the List.
shared_to = The User that is given access to a GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE)
shared_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_by")
shared_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_to")
def __str__(self):
return '%s' % self.list.title
| 34.671233
| 91
| 0.699723
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from v1.recipe.models import Recipe
class GroceryList(models.Model):
title = models.CharField(_("grocery list title"), max_length=250)
slug = AutoSlugField(_('slug'), populate_from='title')
author = models.ForeignKey(User, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def __str__(self):
return '%s' % self.title
def item_count(self):
return GroceryItem.objects.filter(list=self).count()
class GroceryItem(models.Model):
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE, related_name='items')
title = models.CharField(_("title"), max_length=550)
completed = models.BooleanField(_("completed"), default=False)
order = models.IntegerField(_("order"), default=0)
class Meta:
ordering = ['list_id', 'order', 'pk']
def __str__(self):
return '%s' % self.title
class GroceryShared(models.Model):
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE)
shared_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_by")
shared_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_to")
def __str__(self):
return '%s' % self.list.title
| true
| true
|
790217b2aca2a8a98689118c8de76d2f34301000
| 18,931
|
py
|
Python
|
nova/tests/unit/conductor/tasks/test_live_migrate.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 7
|
2015-09-22T11:27:16.000Z
|
2015-11-02T12:33:46.000Z
|
nova/tests/unit/conductor/tasks/test_live_migrate.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 2
|
2015-09-07T22:14:46.000Z
|
2020-08-12T08:51:56.000Z
|
nova/tests/unit/conductor/tasks/test_live_migrate.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 4
|
2015-09-09T16:48:56.000Z
|
2022-03-15T20:52:57.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox3 import mox
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.conductor.tasks import live_migrate
from nova import db
from nova import exception
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = "uuid"
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit)
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_check_requested_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._check_requested_destination()
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_find_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._find_destination().AndReturn("found_host")
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_check_instance_is_running_passes(self):
self.task._check_instance_is_running()
def test_check_instance_is_running_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceNotRunning,
self.task._check_instance_is_running)
def test_check_instance_host_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
self.mox.ReplayAll()
self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
"host").AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_requested_destination(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
self.mox.StubOutWithMock(self.task.compute_rpcapi,
'check_can_live_migrate_destination')
db.service_get_by_compute_host(self.context,
self.destination).AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
hypervisor_details = {
"hypervisor_type": "a",
"hypervisor_version": 6.1,
"free_ram_mb": 513
}
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.instance_host)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task.compute_rpcapi.check_can_live_migrate_destination(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit).AndReturn(
"migrate_data")
self.mox.ReplayAll()
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
def test_check_requested_destination_fails_when_destination_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
self.destination).AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_not_enough_memory(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.task._check_host_is_up(self.destination)
db.service_get_by_compute_host(self.context,
self.destination).AndReturn({
"compute_node": [{"free_ram_mb": 511}]
})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_diff(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_too_old(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
def test_find_destination_works(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
scheduler_utils.build_request_spec(self.context, None,
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_not_implemented_rollback(self):
self.assertRaises(NotImplementedError, self.task.rollback)
| 46.513514
| 78
| 0.665258
|
from mox3 import mox
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.conductor.tasks import live_migrate
from nova import db
from nova import exception
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = "uuid"
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit)
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_check_requested_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._check_requested_destination()
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_find_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._find_destination().AndReturn("found_host")
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_check_instance_is_running_passes(self):
self.task._check_instance_is_running()
def test_check_instance_is_running_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceNotRunning,
self.task._check_instance_is_running)
def test_check_instance_host_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
self.mox.ReplayAll()
self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
"host").AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_requested_destination(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
self.mox.StubOutWithMock(self.task.compute_rpcapi,
'check_can_live_migrate_destination')
db.service_get_by_compute_host(self.context,
self.destination).AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
hypervisor_details = {
"hypervisor_type": "a",
"hypervisor_version": 6.1,
"free_ram_mb": 513
}
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.instance_host)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task.compute_rpcapi.check_can_live_migrate_destination(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit).AndReturn(
"migrate_data")
self.mox.ReplayAll()
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
def test_check_requested_destination_fails_when_destination_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
self.destination).AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_not_enough_memory(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.task._check_host_is_up(self.destination)
db.service_get_by_compute_host(self.context,
self.destination).AndReturn({
"compute_node": [{"free_ram_mb": 511}]
})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_diff(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_too_old(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
def test_find_destination_works(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
scheduler_utils.build_request_spec(self.context, None,
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_not_implemented_rollback(self):
self.assertRaises(NotImplementedError, self.task.rollback)
| true
| true
|
7902185c0b6a150ef40224aba2dc5b625c093f33
| 1,945
|
py
|
Python
|
tests/test_rest_file_region.py
|
Multiscale-Genomics/mg-rest-file
|
7fb9077151ce8e2511296b72b645d92acf95bceb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rest_file_region.py
|
Multiscale-Genomics/mg-rest-file
|
7fb9077151ce8e2511296b72b645d92acf95bceb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rest_file_region.py
|
Multiscale-Genomics/mg-rest-file
|
7fb9077151ce8e2511296b72b645d92acf95bceb
|
[
"Apache-2.0"
] | null | null | null |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_region_meta(client):
"""
Test that the track endpoint is returning the usage paramerts
"""
rest_value = client.get(
'/mug/api/dmp/file/whole',
headers=dict(Authorization='Authorization: Bearer teststring')
)
details = json.loads(rest_value.data)
# print(details)
assert 'usage' in details
def test_region_file(client):
"""
Test that the track endpoint is returning the usage paramerts
"""
rest_value = client.get(
'/mug/api/dmp/file/region?file_id=testtest0000&chrom=19&start=3000000&end=3100000',
headers=dict(Authorization='Authorization: Bearer teststring')
)
assert len(rest_value.data) > 0
| 28.602941
| 91
| 0.698715
|
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_region_meta(client):
rest_value = client.get(
'/mug/api/dmp/file/whole',
headers=dict(Authorization='Authorization: Bearer teststring')
)
details = json.loads(rest_value.data)
assert 'usage' in details
def test_region_file(client):
rest_value = client.get(
'/mug/api/dmp/file/region?file_id=testtest0000&chrom=19&start=3000000&end=3100000',
headers=dict(Authorization='Authorization: Bearer teststring')
)
assert len(rest_value.data) > 0
| true
| true
|
79021887c6b9cd27e0c497685345f14bba935307
| 8,041
|
bzl
|
Python
|
closure/webfiles/web_library.bzl
|
vrana/rules_closure
|
cf1e44edb908e9616030cc83d085989b8e6cd6df
|
[
"Apache-2.0"
] | null | null | null |
closure/webfiles/web_library.bzl
|
vrana/rules_closure
|
cf1e44edb908e9616030cc83d085989b8e6cd6df
|
[
"Apache-2.0"
] | null | null | null |
closure/webfiles/web_library.bzl
|
vrana/rules_closure
|
cf1e44edb908e9616030cc83d085989b8e6cd6df
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Closure Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web component validation, packaging, and development web server."""
load(
"//closure/private:defs.bzl",
"create_argfile",
"difference",
"long_path",
"unfurl",
)
def _web_library(ctx):
if not ctx.attr.srcs:
if ctx.attr.deps:
fail("deps can not be set when srcs is not")
if not ctx.attr.exports:
fail("exports must be set if srcs is not")
if ctx.attr.path:
if not ctx.attr.path.startswith("/"):
fail("webpath must start with /")
if ctx.attr.path != "/" and ctx.attr.path.endswith("/"):
fail("webpath must not end with / unless it is /")
if "//" in ctx.attr.path:
fail("webpath must not have //")
elif ctx.attr.srcs:
fail("path must be set when srcs is set")
if "*" in ctx.attr.suppress and len(ctx.attr.suppress) != 1:
fail("when \"*\" is suppressed no other items should be present")
# process what came before
deps = unfurl(ctx.attr.deps, provider = "webfiles")
webpaths = []
manifests = depset(order = "postorder")
for dep in deps:
webpaths.append(dep.webfiles.webpaths)
manifests += dep.webfiles.manifests
# process what comes now
new_webpaths = []
manifest_srcs = []
path = ctx.attr.path
strip = _get_strip(ctx)
for src in ctx.files.srcs:
suffix = _get_path_relative_to_package(src)
if strip:
if not suffix.startswith(strip):
fail("Relative src path not start with '%s': %s" % (strip, suffix))
suffix = suffix[len(strip):]
webpath = "%s/%s" % ("" if path == "/" else path, suffix)
if webpath in new_webpaths:
_fail(ctx, "multiple srcs within %s define the webpath %s " % (
ctx.label,
webpath,
))
if webpath in webpaths:
_fail(ctx, "webpath %s was defined by %s when already defined by deps" % (
webpath,
ctx.label,
))
new_webpaths.append(webpath)
manifest_srcs.append(struct(
path = src.path,
longpath = long_path(ctx, src),
webpath = webpath,
))
webpaths += [depset(new_webpaths)]
manifest = ctx.actions.declare_file("%s.pbtxt" % ctx.label.name)
ctx.actions.write(
output = manifest,
content = struct(
label = str(ctx.label),
src = manifest_srcs,
).to_proto(),
)
manifests += [manifest]
# perform strict dependency checking
inputs = [manifest]
direct_manifests = depset([manifest])
args = [
"WebfilesValidator",
"--dummy",
ctx.outputs.dummy.path,
"--target",
manifest.path,
]
for category in ctx.attr.suppress:
args.append("--suppress")
args.append(category)
inputs.extend(ctx.files.srcs)
for dep in deps:
inputs.append(dep.webfiles.dummy)
for f in dep.files.to_list():
inputs.append(f)
direct_manifests += [dep.webfiles.manifest]
inputs.append(dep.webfiles.manifest)
args.append("--direct_dep")
args.append(dep.webfiles.manifest.path)
for man in difference(manifests, direct_manifests):
inputs.append(man)
args.append("--transitive_dep")
args.append(man.path)
argfile = create_argfile(ctx.actions, ctx.label.name, args)
inputs.append(argfile)
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.dummy],
executable = ctx.executable._ClosureWorker,
arguments = ["@@" + argfile.path],
mnemonic = "Closure",
execution_requirements = {"supports-workers": "1"},
progress_message = "Checking webfiles in %s" % ctx.label,
)
# define development web server that only applies to this transitive closure
params = struct(
label = str(ctx.label),
bind = "[::]:6006",
manifest = [long_path(ctx, man) for man in manifests.to_list()],
external_asset = [
struct(webpath = k, path = v)
for k, v in ctx.attr.external_assets.items()
],
)
params_file = ctx.actions.declare_file("%s_server_params.pbtxt" % ctx.label.name)
ctx.actions.write(output = params_file, content = params.to_proto())
ctx.actions.write(
is_executable = True,
output = ctx.outputs.executable,
content = "#!/bin/sh\nexec %s %s \"$@\"" % (
ctx.executable._WebfilesServer.short_path,
long_path(ctx, params_file),
),
)
# export data to parent rules
transitive_runfiles = depset()
transitive_runfiles += ctx.attr._WebfilesServer.data_runfiles.files
for dep in deps:
transitive_runfiles += dep.data_runfiles.files
return struct(
files = depset([ctx.outputs.executable, ctx.outputs.dummy]),
exports = unfurl(ctx.attr.exports),
webfiles = struct(
manifest = manifest,
manifests = manifests,
webpaths = depset(transitive = webpaths),
dummy = ctx.outputs.dummy,
),
runfiles = ctx.runfiles(
files = ctx.files.srcs + ctx.files.data + [
manifest,
params_file,
ctx.outputs.executable,
ctx.outputs.dummy,
],
transitive_files = transitive_runfiles,
),
)
def _fail(ctx, message):
if ctx.attr.suppress == ["*"]:
print(message)
else:
fail(message)
def _get_path_relative_to_package(artifact):
"""Returns file path relative to the package that declared it."""
path = artifact.path
for prefix in (
artifact.root.path,
artifact.owner.workspace_root if artifact.owner else "",
artifact.owner.package if artifact.owner else "",
):
if prefix:
prefix = prefix + "/"
if not path.startswith(prefix):
fail("Path %s doesn't start with %s" % (path, prefix))
path = path[len(prefix):]
return path
def _get_strip(ctx):
strip = ctx.attr.strip_prefix
if strip:
if strip.startswith("/"):
_fail(ctx, "strip_prefix should not end with /")
strip = strip[1:]
if strip.endswith("/"):
_fail(ctx, "strip_prefix should not end with /")
else:
strip += "/"
return strip
web_library = rule(
implementation = _web_library,
executable = True,
attrs = {
"path": attr.string(),
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = ["webfiles"]),
"exports": attr.label_list(),
"data": attr.label_list(allow_files = True),
"suppress": attr.string_list(),
"strip_prefix": attr.string(),
"external_assets": attr.string_dict(default = {"/_/runfiles": "."}),
"_ClosureWorker": attr.label(
default = Label("//java/io/bazel/rules/closure:ClosureWorker"),
executable = True,
cfg = "host",
),
"_WebfilesServer": attr.label(
default = Label(
"//java/io/bazel/rules/closure/webfiles/server:WebfilesServer",
),
executable = True,
cfg = "host",
),
},
outputs = {
"dummy": "%{name}.ignoreme",
},
)
| 34.072034
| 86
| 0.584753
|
load(
"//closure/private:defs.bzl",
"create_argfile",
"difference",
"long_path",
"unfurl",
)
def _web_library(ctx):
if not ctx.attr.srcs:
if ctx.attr.deps:
fail("deps can not be set when srcs is not")
if not ctx.attr.exports:
fail("exports must be set if srcs is not")
if ctx.attr.path:
if not ctx.attr.path.startswith("/"):
fail("webpath must start with /")
if ctx.attr.path != "/" and ctx.attr.path.endswith("/"):
fail("webpath must not end with / unless it is /")
if "//" in ctx.attr.path:
fail("webpath must not have //")
elif ctx.attr.srcs:
fail("path must be set when srcs is set")
if "*" in ctx.attr.suppress and len(ctx.attr.suppress) != 1:
fail("when \"*\" is suppressed no other items should be present")
deps = unfurl(ctx.attr.deps, provider = "webfiles")
webpaths = []
manifests = depset(order = "postorder")
for dep in deps:
webpaths.append(dep.webfiles.webpaths)
manifests += dep.webfiles.manifests
new_webpaths = []
manifest_srcs = []
path = ctx.attr.path
strip = _get_strip(ctx)
for src in ctx.files.srcs:
suffix = _get_path_relative_to_package(src)
if strip:
if not suffix.startswith(strip):
fail("Relative src path not start with '%s': %s" % (strip, suffix))
suffix = suffix[len(strip):]
webpath = "%s/%s" % ("" if path == "/" else path, suffix)
if webpath in new_webpaths:
_fail(ctx, "multiple srcs within %s define the webpath %s " % (
ctx.label,
webpath,
))
if webpath in webpaths:
_fail(ctx, "webpath %s was defined by %s when already defined by deps" % (
webpath,
ctx.label,
))
new_webpaths.append(webpath)
manifest_srcs.append(struct(
path = src.path,
longpath = long_path(ctx, src),
webpath = webpath,
))
webpaths += [depset(new_webpaths)]
manifest = ctx.actions.declare_file("%s.pbtxt" % ctx.label.name)
ctx.actions.write(
output = manifest,
content = struct(
label = str(ctx.label),
src = manifest_srcs,
).to_proto(),
)
manifests += [manifest]
inputs = [manifest]
direct_manifests = depset([manifest])
args = [
"WebfilesValidator",
"--dummy",
ctx.outputs.dummy.path,
"--target",
manifest.path,
]
for category in ctx.attr.suppress:
args.append("--suppress")
args.append(category)
inputs.extend(ctx.files.srcs)
for dep in deps:
inputs.append(dep.webfiles.dummy)
for f in dep.files.to_list():
inputs.append(f)
direct_manifests += [dep.webfiles.manifest]
inputs.append(dep.webfiles.manifest)
args.append("--direct_dep")
args.append(dep.webfiles.manifest.path)
for man in difference(manifests, direct_manifests):
inputs.append(man)
args.append("--transitive_dep")
args.append(man.path)
argfile = create_argfile(ctx.actions, ctx.label.name, args)
inputs.append(argfile)
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.dummy],
executable = ctx.executable._ClosureWorker,
arguments = ["@@" + argfile.path],
mnemonic = "Closure",
execution_requirements = {"supports-workers": "1"},
progress_message = "Checking webfiles in %s" % ctx.label,
)
params = struct(
label = str(ctx.label),
bind = "[::]:6006",
manifest = [long_path(ctx, man) for man in manifests.to_list()],
external_asset = [
struct(webpath = k, path = v)
for k, v in ctx.attr.external_assets.items()
],
)
params_file = ctx.actions.declare_file("%s_server_params.pbtxt" % ctx.label.name)
ctx.actions.write(output = params_file, content = params.to_proto())
ctx.actions.write(
is_executable = True,
output = ctx.outputs.executable,
content = "#!/bin/sh\nexec %s %s \"$@\"" % (
ctx.executable._WebfilesServer.short_path,
long_path(ctx, params_file),
),
)
transitive_runfiles = depset()
transitive_runfiles += ctx.attr._WebfilesServer.data_runfiles.files
for dep in deps:
transitive_runfiles += dep.data_runfiles.files
return struct(
files = depset([ctx.outputs.executable, ctx.outputs.dummy]),
exports = unfurl(ctx.attr.exports),
webfiles = struct(
manifest = manifest,
manifests = manifests,
webpaths = depset(transitive = webpaths),
dummy = ctx.outputs.dummy,
),
runfiles = ctx.runfiles(
files = ctx.files.srcs + ctx.files.data + [
manifest,
params_file,
ctx.outputs.executable,
ctx.outputs.dummy,
],
transitive_files = transitive_runfiles,
),
)
def _fail(ctx, message):
if ctx.attr.suppress == ["*"]:
print(message)
else:
fail(message)
def _get_path_relative_to_package(artifact):
path = artifact.path
for prefix in (
artifact.root.path,
artifact.owner.workspace_root if artifact.owner else "",
artifact.owner.package if artifact.owner else "",
):
if prefix:
prefix = prefix + "/"
if not path.startswith(prefix):
fail("Path %s doesn't start with %s" % (path, prefix))
path = path[len(prefix):]
return path
def _get_strip(ctx):
strip = ctx.attr.strip_prefix
if strip:
if strip.startswith("/"):
_fail(ctx, "strip_prefix should not end with /")
strip = strip[1:]
if strip.endswith("/"):
_fail(ctx, "strip_prefix should not end with /")
else:
strip += "/"
return strip
web_library = rule(
implementation = _web_library,
executable = True,
attrs = {
"path": attr.string(),
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = ["webfiles"]),
"exports": attr.label_list(),
"data": attr.label_list(allow_files = True),
"suppress": attr.string_list(),
"strip_prefix": attr.string(),
"external_assets": attr.string_dict(default = {"/_/runfiles": "."}),
"_ClosureWorker": attr.label(
default = Label("//java/io/bazel/rules/closure:ClosureWorker"),
executable = True,
cfg = "host",
),
"_WebfilesServer": attr.label(
default = Label(
"//java/io/bazel/rules/closure/webfiles/server:WebfilesServer",
),
executable = True,
cfg = "host",
),
},
outputs = {
"dummy": "%{name}.ignoreme",
},
)
| true
| true
|
790218de51ca51344397755629177c60815269a1
| 10,690
|
py
|
Python
|
sdk/python/pulumi_aws/timestreamwrite/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/timestreamwrite/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/timestreamwrite/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'TableMagneticStoreWriteProperties',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration',
'TableRetentionProperties',
]
@pulumi.output_type
class TableMagneticStoreWriteProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableMagneticStoreWrites":
suggest = "enable_magnetic_store_writes"
elif key == "magneticStoreRejectedDataLocation":
suggest = "magnetic_store_rejected_data_location"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_magnetic_store_writes: Optional[bool] = None,
magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None):
"""
:param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
if enable_magnetic_store_writes is not None:
pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes)
if magnetic_store_rejected_data_location is not None:
pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location)
@property
@pulumi.getter(name="enableMagneticStoreWrites")
def enable_magnetic_store_writes(self) -> Optional[bool]:
"""
A flag to enable magnetic store writes.
"""
return pulumi.get(self, "enable_magnetic_store_writes")
@property
@pulumi.getter(name="magneticStoreRejectedDataLocation")
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
"""
The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
return pulumi.get(self, "magnetic_store_rejected_data_location")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Configuration":
suggest = "s3_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None):
"""
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
"""
Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
return pulumi.get(self, "s3_configuration")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketName":
suggest = "bucket_name"
elif key == "encryptionOption":
suggest = "encryption_option"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "objectKeyPrefix":
suggest = "object_key_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: Optional[str] = None,
encryption_option: Optional[str] = None,
kms_key_id: Optional[str] = None,
object_key_prefix: Optional[str] = None):
"""
:param str bucket_name: Bucket name of the customer S3 bucket.
:param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
:param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.
:param str object_key_prefix: Object key prefix for the customer S3 location.
"""
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if encryption_option is not None:
pulumi.set(__self__, "encryption_option", encryption_option)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if object_key_prefix is not None:
pulumi.set(__self__, "object_key_prefix", object_key_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
"""
Bucket name of the customer S3 bucket.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionOption")
def encryption_option(self) -> Optional[str]:
"""
Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
"""
return pulumi.get(self, "encryption_option")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
KMS key arn for the customer s3 location when encrypting with a KMS managed key.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="objectKeyPrefix")
def object_key_prefix(self) -> Optional[str]:
"""
Object key prefix for the customer S3 location.
"""
return pulumi.get(self, "object_key_prefix")
@pulumi.output_type
class TableRetentionProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "magneticStoreRetentionPeriodInDays":
suggest = "magnetic_store_retention_period_in_days"
elif key == "memoryStoreRetentionPeriodInHours":
suggest = "memory_store_retention_period_in_hours"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableRetentionProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableRetentionProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
magnetic_store_retention_period_in_days: int,
memory_store_retention_period_in_hours: int):
"""
:param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
:param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days)
pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours)
@property
@pulumi.getter(name="magneticStoreRetentionPeriodInDays")
def magnetic_store_retention_period_in_days(self) -> int:
"""
The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
"""
return pulumi.get(self, "magnetic_store_retention_period_in_days")
@property
@pulumi.getter(name="memoryStoreRetentionPeriodInHours")
def memory_store_retention_period_in_hours(self) -> int:
"""
The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
return pulumi.get(self, "memory_store_retention_period_in_hours")
| 46.277056
| 294
| 0.710196
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'TableMagneticStoreWriteProperties',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration',
'TableRetentionProperties',
]
@pulumi.output_type
class TableMagneticStoreWriteProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableMagneticStoreWrites":
suggest = "enable_magnetic_store_writes"
elif key == "magneticStoreRejectedDataLocation":
suggest = "magnetic_store_rejected_data_location"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_magnetic_store_writes: Optional[bool] = None,
magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None):
if enable_magnetic_store_writes is not None:
pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes)
if magnetic_store_rejected_data_location is not None:
pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location)
@property
@pulumi.getter(name="enableMagneticStoreWrites")
def enable_magnetic_store_writes(self) -> Optional[bool]:
return pulumi.get(self, "enable_magnetic_store_writes")
@property
@pulumi.getter(name="magneticStoreRejectedDataLocation")
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
return pulumi.get(self, "magnetic_store_rejected_data_location")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Configuration":
suggest = "s3_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None):
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
return pulumi.get(self, "s3_configuration")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketName":
suggest = "bucket_name"
elif key == "encryptionOption":
suggest = "encryption_option"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "objectKeyPrefix":
suggest = "object_key_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: Optional[str] = None,
encryption_option: Optional[str] = None,
kms_key_id: Optional[str] = None,
object_key_prefix: Optional[str] = None):
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if encryption_option is not None:
pulumi.set(__self__, "encryption_option", encryption_option)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if object_key_prefix is not None:
pulumi.set(__self__, "object_key_prefix", object_key_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionOption")
def encryption_option(self) -> Optional[str]:
return pulumi.get(self, "encryption_option")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="objectKeyPrefix")
def object_key_prefix(self) -> Optional[str]:
return pulumi.get(self, "object_key_prefix")
@pulumi.output_type
class TableRetentionProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "magneticStoreRetentionPeriodInDays":
suggest = "magnetic_store_retention_period_in_days"
elif key == "memoryStoreRetentionPeriodInHours":
suggest = "memory_store_retention_period_in_hours"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableRetentionProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableRetentionProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
magnetic_store_retention_period_in_days: int,
memory_store_retention_period_in_hours: int):
pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days)
pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours)
@property
@pulumi.getter(name="magneticStoreRetentionPeriodInDays")
def magnetic_store_retention_period_in_days(self) -> int:
return pulumi.get(self, "magnetic_store_retention_period_in_days")
@property
@pulumi.getter(name="memoryStoreRetentionPeriodInHours")
def memory_store_retention_period_in_hours(self) -> int:
return pulumi.get(self, "memory_store_retention_period_in_hours")
| true
| true
|
79021946056bd2c746c424e0c738f34b5e634ab9
| 2,926
|
py
|
Python
|
setup.py
|
RDFLib/PyRDFa
|
efc24d4940910ca1e65900c25b62047301bbdcc7
|
[
"BSD-3-Clause"
] | 8
|
2015-04-01T19:55:22.000Z
|
2020-04-25T08:50:05.000Z
|
setup.py
|
DalavanCloud/PyRDFa
|
fd5c8826fb9e5f6f5a578564b1149fdae6c40aad
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
DalavanCloud/PyRDFa
|
fd5c8826fb9e5f6f5a578564b1149fdae6c40aad
|
[
"BSD-3-Clause"
] | 1
|
2019-02-12T03:15:00.000Z
|
2019-02-12T03:15:00.000Z
|
#!/usr/bin/env python
import sys
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join
tmp_src = join("build", "src")
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
kwargs['src_root'] = setup_python3()
else:
try:
from setuptools import setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
except ImportError:
from distutils.core import setup
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('pyRdfa/__init__.py')
setup(
name = 'pyRdfa',
version = version,
description = "",
author = "",
author_email = "",
maintainer = "",
maintainer_email = "",
url = "",
license = "LICENSE",
platforms = ["any"],
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description = \
"""
""",
download_url = "%s.tar.gz" % version,
packages = ['pyRdfa',
'pyRdfa/host',
'pyRdfa/rdfs',
'pyRdfa/serializers',
'pyRdfa/transform',
],
**kwargs
)
| 30.8
| 82
| 0.570745
|
import sys
import re
def setup_python3():
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join
tmp_src = join("build", "src")
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
kwargs['src_root'] = setup_python3()
else:
try:
from setuptools import setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
except ImportError:
from distutils.core import setup
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('pyRdfa/__init__.py')
setup(
name = 'pyRdfa',
version = version,
description = "",
author = "",
author_email = "",
maintainer = "",
maintainer_email = "",
url = "",
license = "LICENSE",
platforms = ["any"],
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description = \
"""
""",
download_url = "%s.tar.gz" % version,
packages = ['pyRdfa',
'pyRdfa/host',
'pyRdfa/rdfs',
'pyRdfa/serializers',
'pyRdfa/transform',
],
**kwargs
)
| true
| true
|
79021a7712360a761d0b424194f348ac888f99de
| 8,899
|
py
|
Python
|
boids.py
|
KeithRieck/bedlam
|
ce5f87a0211b4eecd553b1aae24e3b3664b43c5e
|
[
"BSD-2-Clause"
] | null | null | null |
boids.py
|
KeithRieck/bedlam
|
ce5f87a0211b4eecd553b1aae24e3b3664b43c5e
|
[
"BSD-2-Clause"
] | null | null | null |
boids.py
|
KeithRieck/bedlam
|
ce5f87a0211b4eecd553b1aae24e3b3664b43c5e
|
[
"BSD-2-Clause"
] | null | null | null |
from bedlam import Game
from bedlam import Scene
from bedlam import Sprite
from balls import Ball
# __pragma__('skip')
document = window = Math = Date = console = 0 # Prevent complaints by optional static checker
# __pragma__('noskip')
# __pragma__('noalias', 'clear')
DEBUG = False
class PVector:
def __init__(self, xx=0, yy=0):
self.x = xx
self.y = yy
def __str__(self):
return "PVector({},{})".format(self.x, self.y)
def reset(self, xx, yy):
self.x = xx
self.y = yy
return self
def copy(self):
return PVector.Instance(self.x, self.y)
def add(self, v):
self.x = self.x + v.x
self.y = self.y + v.y
return self
def sub(self, v):
self.x = self.x - v.x
self.y = self.y - v.y
return self
def mult(self, mag):
self.x = self.x * mag
self.y = self.y * mag
return self
def div(self, mag):
self.x = self.x / mag
self.y = self.y / mag
return self
def normalize(self, mag=1.0):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
self.x = 0
self.y = 0
else:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def limit(self, mag):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
return
if d > mag:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def mag(self):
return Math.sqrt(self.x * self.x + self.y * self.y)
@classmethod
def Instance(cls, xx, yy):
if cls.pool is None:
cls.pool = []
cls.pool_max_size = 10
if len(cls.pool) == 0:
return PVector(xx, yy)
else:
v = cls.pool.pop()
v.x = xx
v.y = yy
return v
@classmethod
def Free(cls, pvector):
if len(cls.pool) < cls.pool_max_size:
cls.pool.append
class Boid(Sprite):
def __init__(self, game, w=10):
Sprite.__init__(self, game, w, w)
self.color = 'white'
self.x = self.game.canvas.width * Math.random()
self.y = self.game.canvas.height * Math.random()
angle = 2 * Math.PI * Math.random()
self.dx = self.game.speed * Math.cos(angle)
self.dy = self.game.speed * Math.sin(angle)
def is_close(self, sprite, dist):
return self.distance(sprite) + self.width / 2 + sprite.width / 2 <= dist
def distance(self, sprite):
vx = self.x - sprite.x
vy = self.y - sprite.y
self_radius = (self.width + self.height) / 2
sprite_radius = (sprite.width + sprite.height) / 2
dist = Math.sqrt(vx * vx + vy * vy) - (self_radius + sprite_radius)
return dist if dist >= 0 else 0
def draw(self, ctx):
global DEBUG
Sprite.draw(self, ctx)
angle = self._angle()
ctx.save()
ctx.globalCompositeOperation = 'source-over'
if DEBUG:
ctx.strokeStyle = '#808080'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.cohesion_radius, 0, 2 * Math.PI)
ctx.stroke()
ctx.strokeStyle = '#696969'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.separation_radius + self.width/2, 0, 2 * Math.PI)
ctx.stroke()
ctx.lineWidth = 2
ctx.strokeStyle = self.color
ctx.fillStyle = self.color
ctx.beginPath()
ctx.translate(self.x, self.y)
ctx.rotate(angle)
ctx.moveTo(-1 * self.width, -0.5 * self.width)
ctx.lineTo(self.width, 0)
ctx.lineTo(-1 * self.width, 0.5 * self.width)
ctx.lineTo(-1 * self.width, -0.5 * self.width)
ctx.translate(-1 * self.originX, -1 * self.originY)
ctx.fill()
ctx.stroke()
ctx.restore()
def _angle(self, a=0.0):
angle = Math.atan2(self.dy, self.dx) + a
while angle > 2 * Math.PI:
angle = angle - 2 * Math.PI
while angle < 0:
angle = angle + 2 * Math.PI
return angle
def _find(self, boid, dist, clazz=None):
return self.game.currentScene.find(boid, dist, clazz)
def update(self, delta_time):
global DEBUG
move = PVector.Instance(self.dx, self.dy)
allignment = self.__calc_allignment().mult(self.game.allignment_mult)
separation = self.__calc_separation().mult(self.game.separation_mult)
cohesion = self.__calc_cohesion().mult(self.game.cohesion_mult)
noise = self.__calc_random_noise().mult(self.game.noise_mult)
if DEBUG:
console.log('time={} : allign={} : avoid={} : noise={} : cohese={}'.format(delta_time, allignment.mag(),
separation.mag(), noise.mag(),
cohesion.mag()))
move.add(allignment)
move.add(separation)
move.add(cohesion)
move.add(noise)
move.limit(self.game.speed)
self.dx = move.x
self.dy = move.y
self.x = self.x + self.dx * delta_time / 1000.0
if self.x < 0:
self.x = self.x + self.game.canvas.width
elif self.x > self.game.canvas.width:
self.x = self.x - self.game.canvas.width
self.y = self.y + self.dy * delta_time / 1000.0
if self.y < 0:
self.y = self.y + self.game.canvas.height
elif self.y > self.game.canvas.height:
self.y = self.y - self.game.canvas.height
PVector.Free(move)
PVector.Free(allignment)
PVector.Free(separation)
PVector.Free(noise)
def __calc_allignment(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.allignment_radius, Boid):
d = self.distance(sprite)
if d == 0:
continue
copy = PVector.Instance(sprite.dx, sprite.dy)
copy.normalize()
copy.div(d)
steer.add(copy)
return steer
def __calc_separation(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.separation_radius, Sprite):
d = self.distance(sprite)
if d == 0:
continue
diff = PVector(self.x - sprite.x, self.y - sprite.y)
diff.normalize()
diff.div(d)
steer.add(diff)
return steer
def __calc_random_noise(self):
return PVector.Instance(Math.random() * 2 - 1, Math.random() * 2 - 1)
def __calc_cohesion(self):
steer = PVector.Instance(0, 0)
count = 0
for sprite in self._find(self, self.game.cohesion_radius, Boid):
steer.x = steer.x + sprite.x
steer.y = steer.y + sprite.y
count = count + 1
if count > 0:
steer.x = steer.x / count
steer.y = steer.y / count
steer.normalize(0.05)
return steer
class BoidsScene(Scene):
def __init__(self, game, name=None, num_boids=8, w=10):
Scene.__init__(self, game, name)
self.color = 'black'
for n in range(num_boids):
self.append(Boid(self.game, w))
for n in range(3):
self.append(Ball(self.game, 30, 10, 'green'))
for n in range(1):
self.append(Ball(self.game, 30, 20, 'red'))
def _clear_screen(self, ctx):
ctx.save()
ctx.globalCompositeOperation = 'copy'
ctx.fillStyle = self.color
ctx.fillRect(0, 0, self.game.canvas.width, self.game.canvas.height)
ctx.restore()
def find(self, boid, dist, clazz=None):
sprite_list = []
for sprite in self:
if clazz is not None and not isinstance(sprite, clazz):
continue
if sprite == boid:
continue
if boid.is_close(sprite, dist):
sprite_list.append(sprite)
return sprite_list
class BoidsGame(Game):
def __init__(self, name='Boids', loop_time=20):
Game.__init__(self, name, loop_time)
sprite_width = 5
global_scale = sprite_width / 10.0
self.speed = 100
self.allignment_radius = 180 * global_scale
self.separation_radius = 25 * global_scale
self.cohesion_radius = self.allignment_radius
self.allignment_mult = 3
self.separation_mult = 30
self.cohesion_mult = 25
self.noise_mult = 5
self.append(BoidsScene(self, 'BOIDS', 32, sprite_width))
@staticmethod
def set_debug(b):
global DEBUG
if b is not None and b == 'true':
DEBUG = True
| 31.44523
| 117
| 0.539049
|
from bedlam import Game
from bedlam import Scene
from bedlam import Sprite
from balls import Ball
document = window = Math = Date = console = 0
DEBUG = False
class PVector:
def __init__(self, xx=0, yy=0):
self.x = xx
self.y = yy
def __str__(self):
return "PVector({},{})".format(self.x, self.y)
def reset(self, xx, yy):
self.x = xx
self.y = yy
return self
def copy(self):
return PVector.Instance(self.x, self.y)
def add(self, v):
self.x = self.x + v.x
self.y = self.y + v.y
return self
def sub(self, v):
self.x = self.x - v.x
self.y = self.y - v.y
return self
def mult(self, mag):
self.x = self.x * mag
self.y = self.y * mag
return self
def div(self, mag):
self.x = self.x / mag
self.y = self.y / mag
return self
def normalize(self, mag=1.0):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
self.x = 0
self.y = 0
else:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def limit(self, mag):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
return
if d > mag:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def mag(self):
return Math.sqrt(self.x * self.x + self.y * self.y)
@classmethod
def Instance(cls, xx, yy):
if cls.pool is None:
cls.pool = []
cls.pool_max_size = 10
if len(cls.pool) == 0:
return PVector(xx, yy)
else:
v = cls.pool.pop()
v.x = xx
v.y = yy
return v
@classmethod
def Free(cls, pvector):
if len(cls.pool) < cls.pool_max_size:
cls.pool.append
class Boid(Sprite):
def __init__(self, game, w=10):
Sprite.__init__(self, game, w, w)
self.color = 'white'
self.x = self.game.canvas.width * Math.random()
self.y = self.game.canvas.height * Math.random()
angle = 2 * Math.PI * Math.random()
self.dx = self.game.speed * Math.cos(angle)
self.dy = self.game.speed * Math.sin(angle)
def is_close(self, sprite, dist):
return self.distance(sprite) + self.width / 2 + sprite.width / 2 <= dist
def distance(self, sprite):
vx = self.x - sprite.x
vy = self.y - sprite.y
self_radius = (self.width + self.height) / 2
sprite_radius = (sprite.width + sprite.height) / 2
dist = Math.sqrt(vx * vx + vy * vy) - (self_radius + sprite_radius)
return dist if dist >= 0 else 0
def draw(self, ctx):
global DEBUG
Sprite.draw(self, ctx)
angle = self._angle()
ctx.save()
ctx.globalCompositeOperation = 'source-over'
if DEBUG:
ctx.strokeStyle = '#808080'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.cohesion_radius, 0, 2 * Math.PI)
ctx.stroke()
ctx.strokeStyle = '#696969'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.separation_radius + self.width/2, 0, 2 * Math.PI)
ctx.stroke()
ctx.lineWidth = 2
ctx.strokeStyle = self.color
ctx.fillStyle = self.color
ctx.beginPath()
ctx.translate(self.x, self.y)
ctx.rotate(angle)
ctx.moveTo(-1 * self.width, -0.5 * self.width)
ctx.lineTo(self.width, 0)
ctx.lineTo(-1 * self.width, 0.5 * self.width)
ctx.lineTo(-1 * self.width, -0.5 * self.width)
ctx.translate(-1 * self.originX, -1 * self.originY)
ctx.fill()
ctx.stroke()
ctx.restore()
def _angle(self, a=0.0):
angle = Math.atan2(self.dy, self.dx) + a
while angle > 2 * Math.PI:
angle = angle - 2 * Math.PI
while angle < 0:
angle = angle + 2 * Math.PI
return angle
def _find(self, boid, dist, clazz=None):
return self.game.currentScene.find(boid, dist, clazz)
def update(self, delta_time):
global DEBUG
move = PVector.Instance(self.dx, self.dy)
allignment = self.__calc_allignment().mult(self.game.allignment_mult)
separation = self.__calc_separation().mult(self.game.separation_mult)
cohesion = self.__calc_cohesion().mult(self.game.cohesion_mult)
noise = self.__calc_random_noise().mult(self.game.noise_mult)
if DEBUG:
console.log('time={} : allign={} : avoid={} : noise={} : cohese={}'.format(delta_time, allignment.mag(),
separation.mag(), noise.mag(),
cohesion.mag()))
move.add(allignment)
move.add(separation)
move.add(cohesion)
move.add(noise)
move.limit(self.game.speed)
self.dx = move.x
self.dy = move.y
self.x = self.x + self.dx * delta_time / 1000.0
if self.x < 0:
self.x = self.x + self.game.canvas.width
elif self.x > self.game.canvas.width:
self.x = self.x - self.game.canvas.width
self.y = self.y + self.dy * delta_time / 1000.0
if self.y < 0:
self.y = self.y + self.game.canvas.height
elif self.y > self.game.canvas.height:
self.y = self.y - self.game.canvas.height
PVector.Free(move)
PVector.Free(allignment)
PVector.Free(separation)
PVector.Free(noise)
def __calc_allignment(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.allignment_radius, Boid):
d = self.distance(sprite)
if d == 0:
continue
copy = PVector.Instance(sprite.dx, sprite.dy)
copy.normalize()
copy.div(d)
steer.add(copy)
return steer
def __calc_separation(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.separation_radius, Sprite):
d = self.distance(sprite)
if d == 0:
continue
diff = PVector(self.x - sprite.x, self.y - sprite.y)
diff.normalize()
diff.div(d)
steer.add(diff)
return steer
def __calc_random_noise(self):
return PVector.Instance(Math.random() * 2 - 1, Math.random() * 2 - 1)
def __calc_cohesion(self):
steer = PVector.Instance(0, 0)
count = 0
for sprite in self._find(self, self.game.cohesion_radius, Boid):
steer.x = steer.x + sprite.x
steer.y = steer.y + sprite.y
count = count + 1
if count > 0:
steer.x = steer.x / count
steer.y = steer.y / count
steer.normalize(0.05)
return steer
class BoidsScene(Scene):
def __init__(self, game, name=None, num_boids=8, w=10):
Scene.__init__(self, game, name)
self.color = 'black'
for n in range(num_boids):
self.append(Boid(self.game, w))
for n in range(3):
self.append(Ball(self.game, 30, 10, 'green'))
for n in range(1):
self.append(Ball(self.game, 30, 20, 'red'))
def _clear_screen(self, ctx):
ctx.save()
ctx.globalCompositeOperation = 'copy'
ctx.fillStyle = self.color
ctx.fillRect(0, 0, self.game.canvas.width, self.game.canvas.height)
ctx.restore()
def find(self, boid, dist, clazz=None):
sprite_list = []
for sprite in self:
if clazz is not None and not isinstance(sprite, clazz):
continue
if sprite == boid:
continue
if boid.is_close(sprite, dist):
sprite_list.append(sprite)
return sprite_list
class BoidsGame(Game):
def __init__(self, name='Boids', loop_time=20):
Game.__init__(self, name, loop_time)
sprite_width = 5
global_scale = sprite_width / 10.0
self.speed = 100
self.allignment_radius = 180 * global_scale
self.separation_radius = 25 * global_scale
self.cohesion_radius = self.allignment_radius
self.allignment_mult = 3
self.separation_mult = 30
self.cohesion_mult = 25
self.noise_mult = 5
self.append(BoidsScene(self, 'BOIDS', 32, sprite_width))
@staticmethod
def set_debug(b):
global DEBUG
if b is not None and b == 'true':
DEBUG = True
| true
| true
|
79021a7efcf5dac874e5515c508c1bb0d83f476d
| 1,326
|
py
|
Python
|
local_pypfilt/tests/test_io.py
|
ruarai/epifx.covid
|
be7aecbf9e86c3402f6851ea65f6705cdb59f3cf
|
[
"BSD-3-Clause"
] | null | null | null |
local_pypfilt/tests/test_io.py
|
ruarai/epifx.covid
|
be7aecbf9e86c3402f6851ea65f6705cdb59f3cf
|
[
"BSD-3-Clause"
] | null | null | null |
local_pypfilt/tests/test_io.py
|
ruarai/epifx.covid
|
be7aecbf9e86c3402f6851ea65f6705cdb59f3cf
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test cases for the pypfilt.io module."""
import datetime
import numpy as np
import os
from pypfilt.io import read_table, date_column
def test_read_datetime():
# Test data: sequential dates with Fibonacci sequence.
content = """
date count
2020-01-01 1
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 5
2020-01-06 8
2020-01-07 13
2020-01-08 21
2020-01-09 34
"""
expect_rows = 9
expect_count = [1, 1]
for i in range(expect_rows - 2):
expect_count.append(expect_count[i] + expect_count[i + 1])
# Save this data to a temporary data file.
path = "test_read_datetime.ssv"
with open(path, encoding='utf-8', mode='w') as f:
f.write(content)
# Read the data and then remove the data file.
columns = [
date_column('date'),
('count', np.int_),
]
df = read_table(path, columns)
os.remove(path)
# Check that we received the expected number of rows.
assert len(df) == expect_rows
# Check that each row has the expected content.
for ix, row in enumerate(df):
assert isinstance(row['date'], datetime.datetime)
assert row['date'].year == 2020
assert row['date'].month == 1
assert row['date'].day == ix + 1
assert row['count'] == expect_count[ix]
| 25.5
| 66
| 0.617647
|
import datetime
import numpy as np
import os
from pypfilt.io import read_table, date_column
def test_read_datetime():
content = """
date count
2020-01-01 1
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 5
2020-01-06 8
2020-01-07 13
2020-01-08 21
2020-01-09 34
"""
expect_rows = 9
expect_count = [1, 1]
for i in range(expect_rows - 2):
expect_count.append(expect_count[i] + expect_count[i + 1])
path = "test_read_datetime.ssv"
with open(path, encoding='utf-8', mode='w') as f:
f.write(content)
columns = [
date_column('date'),
('count', np.int_),
]
df = read_table(path, columns)
os.remove(path)
assert len(df) == expect_rows
for ix, row in enumerate(df):
assert isinstance(row['date'], datetime.datetime)
assert row['date'].year == 2020
assert row['date'].month == 1
assert row['date'].day == ix + 1
assert row['count'] == expect_count[ix]
| true
| true
|
79021aa75a1f91190444e8108f7c4c5a004b7aaa
| 200
|
py
|
Python
|
lightning_transformers/task/nlp/translation/datasets/__init__.py
|
zhaisilong/lightning-transformers
|
cd6843b6caa8279df86bb5e808dfccc79ca9c3d2
|
[
"Apache-2.0"
] | null | null | null |
lightning_transformers/task/nlp/translation/datasets/__init__.py
|
zhaisilong/lightning-transformers
|
cd6843b6caa8279df86bb5e808dfccc79ca9c3d2
|
[
"Apache-2.0"
] | null | null | null |
lightning_transformers/task/nlp/translation/datasets/__init__.py
|
zhaisilong/lightning-transformers
|
cd6843b6caa8279df86bb5e808dfccc79ca9c3d2
|
[
"Apache-2.0"
] | null | null | null |
from lightning_transformers.task.nlp.translation.datasets.wmt16 import WMT16TranslationDataModule
from lightning_transformers.task.nlp.translation.datasets.smiles import SMILESTranslationDataModule
| 40
| 99
| 0.9
|
from lightning_transformers.task.nlp.translation.datasets.wmt16 import WMT16TranslationDataModule
from lightning_transformers.task.nlp.translation.datasets.smiles import SMILESTranslationDataModule
| true
| true
|
79021b0d7e1462d14c84ecc119c3cb047effa787
| 4,245
|
py
|
Python
|
script.module.placenta/lib/resources/lib/sources/de/movie2z.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 1
|
2021-05-09T19:55:51.000Z
|
2021-05-09T19:55:51.000Z
|
script.module.placenta/lib/resources/lib/sources/de/movie2z.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | null | null | null |
script.module.placenta/lib/resources/lib/sources/de/movie2z.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['movie2z.to']
self.base_link = 'https://www.movie2z.to/de/'
self.search_link = 'search-%s.html'
self.get_link = 'redirect.php?a=m&id=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases))
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases))
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
url = url[:-1] if url.endswith('/') else url
url += '/%d/%d/' % (int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
r = dom_parser.parse_dom(r, 'li')
for i in r:
i = dom_parser.parse_dom(i, 'a')
i = i[0][0]['href']
i = client.request(i)
i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'})
i = dom_parser.parse_dom(i, 'option')
for x in i:
hoster = re.search('^\S*', x[1]).group().lower()
url = x[0]['value']
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
url = url.replace('amp;', '')
url = client.request(url, output='geturl')
return url
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(urllib.quote_plus(cleantitle.query(titles[0]))))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
post = urllib.urlencode({'movlang_de': '1', 'movlang': ''})
r = client.request(query, post=post)
r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'})
for x in r:
title = cleantitle.get(x[1])
if title in t:
return source_utils.strip_domain(x[0]['href'])
return
except:
return
| 35.082645
| 139
| 0.521084
|
= re.search('^\S*', x[1]).group().lower()
url = x[0]['value']
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
url = url.replace('amp;', '')
url = client.request(url, output='geturl')
return url
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(urllib.quote_plus(cleantitle.query(titles[0]))))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
post = urllib.urlencode({'movlang_de': '1', 'movlang': ''})
r = client.request(query, post=post)
r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'})
for x in r:
title = cleantitle.get(x[1])
if title in t:
return source_utils.strip_domain(x[0]['href'])
return
except:
return
| true
| true
|
79021c270b531509df93b42504fbc84eaea6259f
| 94,292
|
py
|
Python
|
pycherwell/api/teams_api.py
|
greenpau/pycherwell
|
2a25446d5cf86d69e6158067faf27ce250aba966
|
[
"Apache-2.0"
] | 2
|
2020-04-09T16:41:25.000Z
|
2020-08-25T21:07:53.000Z
|
pycherwell/api/teams_api.py
|
greenpau/pycherwell
|
2a25446d5cf86d69e6158067faf27ce250aba966
|
[
"Apache-2.0"
] | 15
|
2020-02-12T14:57:30.000Z
|
2020-11-27T23:34:15.000Z
|
pycherwell/api/teams_api.py
|
greenpau/pycherwell
|
2a25446d5cf86d69e6158067faf27ce250aba966
|
[
"Apache-2.0"
] | 2
|
2020-02-12T14:57:38.000Z
|
2021-07-30T11:32:28.000Z
|
# coding: utf-8
"""
Cherwell REST API
Unofficial Python Cherwell REST API library. # noqa: E501
The version of the OpenAPI document: 9.3.2
Contact: See AUTHORS.
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from pycherwell.api_client import ApiClient
from pycherwell.exceptions import (
ApiTypeError,
ApiValueError
)
class TeamsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs): # noqa: E501
"""Add users to a team by batch # noqa: E501
Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: AddUserToTeamByBatchResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, **kwargs) # noqa: E501
def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs): # noqa: E501
"""Add users to a team by batch # noqa: E501
Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['add_user_to_team_by_batch_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_by_batch_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'add_user_to_team_by_batch_request' is set
if self.api_client.client_side_validation and ('add_user_to_team_by_batch_request' not in local_var_params or # noqa: E501
local_var_params['add_user_to_team_by_batch_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `add_user_to_team_by_batch_request` when calling `teams_add_user_to_team_by_batch_v1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_by_batch_request' in local_var_params:
body_params = local_var_params['add_user_to_team_by_batch_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/addusertoteambybatch', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AddUserToTeamByBatchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs): # noqa: E501
"""Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, **kwargs) # noqa: E501
def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs): # noqa: E501
"""Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['add_user_to_team_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'add_user_to_team_request' is set
if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or # noqa: E501
local_var_params['add_user_to_team_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_request' in local_var_params:
body_params = local_var_params['add_user_to_team_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/addusertoteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs): # noqa: E501
"""Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: AddUserToTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, **kwargs) # noqa: E501
def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs): # noqa: E501
"""Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['add_user_to_team_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'add_user_to_team_request' is set
if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or # noqa: E501
local_var_params['add_user_to_team_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v2`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_request' in local_var_params:
body_params = local_var_params['add_user_to_team_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/addusertoteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AddUserToTeamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_delete_team_v1(self, teamid, **kwargs): # noqa: E501
"""Delete a Team # noqa: E501
Operation to delete a Team by Team ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_delete_team_v1(teamid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str teamid: Specify the Team ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_delete_team_v1_with_http_info(teamid, **kwargs) # noqa: E501
def teams_delete_team_v1_with_http_info(self, teamid, **kwargs): # noqa: E501
"""Delete a Team # noqa: E501
Operation to delete a Team by Team ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str teamid: Specify the Team ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['teamid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_delete_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'teamid' is set
if self.api_client.client_side_validation and ('teamid' not in local_var_params or # noqa: E501
local_var_params['teamid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `teamid` when calling `teams_delete_team_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'teamid' in local_var_params:
path_params['teamid'] = local_var_params['teamid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/deleteteam/{teamid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_team_v1(self, teamid, **kwargs): # noqa: E501
"""Get a team by its TeamId # noqa: E501
Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use \"Get all available Teams.\" Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_team_v1(teamid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str teamid: The Team ID of the Team to get. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_team_v1_with_http_info(teamid, **kwargs) # noqa: E501
def teams_get_team_v1_with_http_info(self, teamid, **kwargs): # noqa: E501
"""Get a team by its TeamId # noqa: E501
Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use \"Get all available Teams.\" Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str teamid: The Team ID of the Team to get. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['teamid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'teamid' is set
if self.api_client.client_side_validation and ('teamid' not in local_var_params or # noqa: E501
local_var_params['teamid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `teamid` when calling `teams_get_team_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'teamid' in local_var_params:
path_params['teamid'] = local_var_params['teamid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getteam/{teamid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_teams_v1(self, **kwargs): # noqa: E501
"""Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v1(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_teams_v1_with_http_info(**kwargs) # noqa: E501
def teams_get_teams_v1_with_http_info(self, **kwargs): # noqa: E501
"""Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_teams_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getteams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_teams_v2(self, **kwargs): # noqa: E501
"""Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v2(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsV2Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_teams_v2_with_http_info(**kwargs) # noqa: E501
def teams_get_teams_v2_with_http_info(self, **kwargs): # noqa: E501
"""Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v2_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_teams_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getteams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_users_teams_v1(self, user_record_id, **kwargs): # noqa: E501
"""Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_record_id: Specify the user record ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_users_teams_v1_with_http_info(user_record_id, **kwargs) # noqa: E501
def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs): # noqa: E501
"""Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_record_id: Specify the user record ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['user_record_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_users_teams_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_record_id' is set
if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or # noqa: E501
local_var_params['user_record_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_record_id' in local_var_params:
path_params['userRecordId'] = local_var_params['user_record_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getusersteams/userrecordid/{userRecordId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_users_teams_v2(self, user_record_id, **kwargs): # noqa: E501
"""Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_record_id: Specify the user record ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsV2Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_users_teams_v2_with_http_info(user_record_id, **kwargs) # noqa: E501
def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs): # noqa: E501
"""Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_record_id: Specify the user record ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['user_record_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_users_teams_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_record_id' is set
if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or # noqa: E501
local_var_params['user_record_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_record_id' in local_var_params:
path_params['userRecordId'] = local_var_params['user_record_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getusersteams/userrecordid/{userRecordId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_workgroups_v1(self, **kwargs): # noqa: E501
"""Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v1(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_workgroups_v1_with_http_info(**kwargs) # noqa: E501
def teams_get_workgroups_v1_with_http_info(self, **kwargs): # noqa: E501
"""Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_workgroups_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getworkgroups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_workgroups_v2(self, **kwargs): # noqa: E501
"""Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v2(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsV2Response
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_get_workgroups_v2_with_http_info(**kwargs) # noqa: E501
def teams_get_workgroups_v2_with_http_info(self, **kwargs): # noqa: E501
"""Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v2_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_workgroups_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getworkgroups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_customer_from_workgroup_v1(self, workgroupid, customerrecordid, **kwargs): # noqa: E501
"""Remove a customer from a Workgroup # noqa: E501
Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_customer_from_workgroup_v1(workgroupid, customerrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str workgroupid: Specify the Workgroup ID. (required)
:param str customerrecordid: Specify the Customer record ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: RemoveCustomerFromWorkgroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, **kwargs) # noqa: E501
def teams_remove_customer_from_workgroup_v1_with_http_info(self, workgroupid, customerrecordid, **kwargs): # noqa: E501
"""Remove a customer from a Workgroup # noqa: E501
Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str workgroupid: Specify the Workgroup ID. (required)
:param str customerrecordid: Specify the Customer record ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(RemoveCustomerFromWorkgroupResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['workgroupid', 'customerrecordid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_customer_from_workgroup_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'workgroupid' is set
if self.api_client.client_side_validation and ('workgroupid' not in local_var_params or # noqa: E501
local_var_params['workgroupid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `workgroupid` when calling `teams_remove_customer_from_workgroup_v1`") # noqa: E501
# verify the required parameter 'customerrecordid' is set
if self.api_client.client_side_validation and ('customerrecordid' not in local_var_params or # noqa: E501
local_var_params['customerrecordid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `customerrecordid` when calling `teams_remove_customer_from_workgroup_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workgroupid' in local_var_params:
path_params['workgroupid'] = local_var_params['workgroupid'] # noqa: E501
if 'customerrecordid' in local_var_params:
path_params['customerrecordid'] = local_var_params['customerrecordid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/removecustomerfromworkgroup/workgroupid/{workgroupid}/customerrecordid/{customerrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemoveCustomerFromWorkgroupResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_user_from_team_v1(self, team_id, userrecordid, **kwargs): # noqa: E501
"""Operation to remove a User from a Team. # noqa: E501
Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_user_from_team_v1(team_id, userrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str team_id: Specify the internal ID of the Team. (required)
:param str userrecordid: Specify the record ID of the User to remove. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, **kwargs) # noqa: E501
def teams_remove_user_from_team_v1_with_http_info(self, team_id, userrecordid, **kwargs): # noqa: E501
"""Operation to remove a User from a Team. # noqa: E501
Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str team_id: Specify the internal ID of the Team. (required)
:param str userrecordid: Specify the record ID of the User to remove. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['team_id', 'userrecordid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_user_from_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'team_id' is set
if self.api_client.client_side_validation and ('team_id' not in local_var_params or # noqa: E501
local_var_params['team_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v1`") # noqa: E501
# verify the required parameter 'userrecordid' is set
if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or # noqa: E501
local_var_params['userrecordid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'team_id' in local_var_params:
path_params['teamId'] = local_var_params['team_id'] # noqa: E501
if 'userrecordid' in local_var_params:
path_params['userrecordid'] = local_var_params['userrecordid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_user_from_team_v2(self, team_id, userrecordid, **kwargs): # noqa: E501
"""Operation to remove a User from a Team. # noqa: E501
Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_user_from_team_v2(team_id, userrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str team_id: Specify the internal ID of the Team. (required)
:param str userrecordid: Specify the record ID of the User to remove. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: RemoveUserFromTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, **kwargs) # noqa: E501
def teams_remove_user_from_team_v2_with_http_info(self, team_id, userrecordid, **kwargs): # noqa: E501
"""Operation to remove a User from a Team. # noqa: E501
Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str team_id: Specify the internal ID of the Team. (required)
:param str userrecordid: Specify the record ID of the User to remove. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(RemoveUserFromTeamResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['team_id', 'userrecordid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_user_from_team_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'team_id' is set
if self.api_client.client_side_validation and ('team_id' not in local_var_params or # noqa: E501
local_var_params['team_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v2`") # noqa: E501
# verify the required parameter 'userrecordid' is set
if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or # noqa: E501
local_var_params['userrecordid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'team_id' in local_var_params:
path_params['teamId'] = local_var_params['team_id'] # noqa: E501
if 'userrecordid' in local_var_params:
path_params['userrecordid'] = local_var_params['userrecordid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemoveUserFromTeamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_team_member_v1(self, save_team_member_request, **kwargs): # noqa: E501
"""Add or Update a team member # noqa: E501
Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_team_member_v1(save_team_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SaveTeamMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_save_team_member_v1_with_http_info(save_team_member_request, **kwargs) # noqa: E501
def teams_save_team_member_v1_with_http_info(self, save_team_member_request, **kwargs): # noqa: E501
"""Add or Update a team member # noqa: E501
Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_team_member_v1_with_http_info(save_team_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SaveTeamMemberResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['save_team_member_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_team_member_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'save_team_member_request' is set
if self.api_client.client_side_validation and ('save_team_member_request' not in local_var_params or # noqa: E501
local_var_params['save_team_member_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `save_team_member_request` when calling `teams_save_team_member_v1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'save_team_member_request' in local_var_params:
body_params = local_var_params['save_team_member_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/saveteammember', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaveTeamMemberResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_team_v1(self, team_save_request, **kwargs): # noqa: E501
"""Create or update a team # noqa: E501
Operation to create or update a Team or Workgroup. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_team_v1(team_save_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamSaveResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_save_team_v1_with_http_info(team_save_request, **kwargs) # noqa: E501
def teams_save_team_v1_with_http_info(self, team_save_request, **kwargs): # noqa: E501
"""Create or update a team # noqa: E501
Operation to create or update a Team or Workgroup. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_team_v1_with_http_info(team_save_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamSaveResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['team_save_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'team_save_request' is set
if self.api_client.client_side_validation and ('team_save_request' not in local_var_params or # noqa: E501
local_var_params['team_save_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `team_save_request` when calling `teams_save_team_v1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'team_save_request' in local_var_params:
body_params = local_var_params['team_save_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/saveteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamSaveResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_workgroup_member_v1(self, save_workgroup_member_request, **kwargs): # noqa: E501
"""Save the membership status of a Workgroup member. # noqa: E501
Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_workgroup_member_v1(save_workgroup_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SaveWorkgroupMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, **kwargs) # noqa: E501
def teams_save_workgroup_member_v1_with_http_info(self, save_workgroup_member_request, **kwargs): # noqa: E501
"""Save the membership status of a Workgroup member. # noqa: E501
Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SaveWorkgroupMemberResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['save_workgroup_member_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_workgroup_member_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'save_workgroup_member_request' is set
if self.api_client.client_side_validation and ('save_workgroup_member_request' not in local_var_params or # noqa: E501
local_var_params['save_workgroup_member_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `save_workgroup_member_request` when calling `teams_save_workgroup_member_v1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'save_workgroup_member_request' in local_var_params:
body_params = local_var_params['save_workgroup_member_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/saveworkgroupmember', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaveWorkgroupMemberResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.182012
| 404
| 0.62053
|
from __future__ import absolute_import
import re
import six
from pycherwell.api_client import ApiClient
from pycherwell.exceptions import (
ApiTypeError,
ApiValueError
)
class TeamsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, **kwargs)
def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs):
local_var_params = locals()
all_params = ['add_user_to_team_by_batch_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_by_batch_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('add_user_to_team_by_batch_request' not in local_var_params or
local_var_params['add_user_to_team_by_batch_request'] is None):
raise ApiValueError("Missing the required parameter `add_user_to_team_by_batch_request` when calling `teams_add_user_to_team_by_batch_v1`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_by_batch_request' in local_var_params:
body_params = local_var_params['add_user_to_team_by_batch_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/addusertoteambybatch', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AddUserToTeamByBatchResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, **kwargs)
def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs):
local_var_params = locals()
all_params = ['add_user_to_team_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or
local_var_params['add_user_to_team_request'] is None):
raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v1`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_request' in local_var_params:
body_params = local_var_params['add_user_to_team_request']
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/addusertoteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, **kwargs)
def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs):
local_var_params = locals()
all_params = ['add_user_to_team_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_add_user_to_team_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or
local_var_params['add_user_to_team_request'] is None):
raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v2`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_user_to_team_request' in local_var_params:
body_params = local_var_params['add_user_to_team_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V2/addusertoteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AddUserToTeamResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_delete_team_v1(self, teamid, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_delete_team_v1_with_http_info(teamid, **kwargs)
def teams_delete_team_v1_with_http_info(self, teamid, **kwargs):
local_var_params = locals()
all_params = ['teamid']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_delete_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('teamid' not in local_var_params or
local_var_params['teamid'] is None):
raise ApiValueError("Missing the required parameter `teamid` when calling `teams_delete_team_v1`")
collection_formats = {}
path_params = {}
if 'teamid' in local_var_params:
path_params['teamid'] = local_var_params['teamid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = []
return self.api_client.call_api(
'/api/V1/deleteteam/{teamid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_team_v1(self, teamid, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_team_v1_with_http_info(teamid, **kwargs)
def teams_get_team_v1_with_http_info(self, teamid, **kwargs):
local_var_params = locals()
all_params = ['teamid']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('teamid' not in local_var_params or
local_var_params['teamid'] is None):
raise ApiValueError("Missing the required parameter `teamid` when calling `teams_get_team_v1`")
collection_formats = {}
path_params = {}
if 'teamid' in local_var_params:
path_params['teamid'] = local_var_params['teamid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/getteam/{teamid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_teams_v1(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_teams_v1_with_http_info(**kwargs)
def teams_get_teams_v1_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_teams_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/getteams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_teams_v2(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_teams_v2_with_http_info(**kwargs)
def teams_get_teams_v2_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_teams_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V2/getteams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_users_teams_v1(self, user_record_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_users_teams_v1_with_http_info(user_record_id, **kwargs)
def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs):
local_var_params = locals()
all_params = ['user_record_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_users_teams_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or
local_var_params['user_record_id'] is None):
raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v1`")
collection_formats = {}
path_params = {}
if 'user_record_id' in local_var_params:
path_params['userRecordId'] = local_var_params['user_record_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/getusersteams/userrecordid/{userRecordId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_users_teams_v2(self, user_record_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_users_teams_v2_with_http_info(user_record_id, **kwargs)
def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs):
local_var_params = locals()
all_params = ['user_record_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_users_teams_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or
local_var_params['user_record_id'] is None):
raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v2`")
collection_formats = {}
path_params = {}
if 'user_record_id' in local_var_params:
path_params['userRecordId'] = local_var_params['user_record_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V2/getusersteams/userrecordid/{userRecordId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_workgroups_v1(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_workgroups_v1_with_http_info(**kwargs)
def teams_get_workgroups_v1_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_workgroups_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/getworkgroups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_get_workgroups_v2(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_get_workgroups_v2_with_http_info(**kwargs)
def teams_get_workgroups_v2_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_get_workgroups_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V2/getworkgroups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsV2Response',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_customer_from_workgroup_v1(self, workgroupid, customerrecordid, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, **kwargs)
def teams_remove_customer_from_workgroup_v1_with_http_info(self, workgroupid, customerrecordid, **kwargs):
local_var_params = locals()
all_params = ['workgroupid', 'customerrecordid']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_customer_from_workgroup_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('workgroupid' not in local_var_params or
local_var_params['workgroupid'] is None):
raise ApiValueError("Missing the required parameter `workgroupid` when calling `teams_remove_customer_from_workgroup_v1`")
if self.api_client.client_side_validation and ('customerrecordid' not in local_var_params or
local_var_params['customerrecordid'] is None):
raise ApiValueError("Missing the required parameter `customerrecordid` when calling `teams_remove_customer_from_workgroup_v1`")
collection_formats = {}
path_params = {}
if 'workgroupid' in local_var_params:
path_params['workgroupid'] = local_var_params['workgroupid']
if 'customerrecordid' in local_var_params:
path_params['customerrecordid'] = local_var_params['customerrecordid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/removecustomerfromworkgroup/workgroupid/{workgroupid}/customerrecordid/{customerrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemoveCustomerFromWorkgroupResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_user_from_team_v1(self, team_id, userrecordid, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, **kwargs)
def teams_remove_user_from_team_v1_with_http_info(self, team_id, userrecordid, **kwargs):
local_var_params = locals()
all_params = ['team_id', 'userrecordid']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_user_from_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('team_id' not in local_var_params or
local_var_params['team_id'] is None):
raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v1`")
if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or
local_var_params['userrecordid'] is None):
raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v1`")
collection_formats = {}
path_params = {}
if 'team_id' in local_var_params:
path_params['teamId'] = local_var_params['team_id']
if 'userrecordid' in local_var_params:
path_params['userrecordid'] = local_var_params['userrecordid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = []
return self.api_client.call_api(
'/api/V1/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_remove_user_from_team_v2(self, team_id, userrecordid, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, **kwargs)
def teams_remove_user_from_team_v2_with_http_info(self, team_id, userrecordid, **kwargs):
local_var_params = locals()
all_params = ['team_id', 'userrecordid']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_remove_user_from_team_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('team_id' not in local_var_params or
local_var_params['team_id'] is None):
raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v2`")
if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or
local_var_params['userrecordid'] is None):
raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v2`")
collection_formats = {}
path_params = {}
if 'team_id' in local_var_params:
path_params['teamId'] = local_var_params['team_id']
if 'userrecordid' in local_var_params:
path_params['userrecordid'] = local_var_params['userrecordid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V2/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemoveUserFromTeamResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_team_member_v1(self, save_team_member_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_save_team_member_v1_with_http_info(save_team_member_request, **kwargs)
def teams_save_team_member_v1_with_http_info(self, save_team_member_request, **kwargs):
local_var_params = locals()
all_params = ['save_team_member_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_team_member_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('save_team_member_request' not in local_var_params or
local_var_params['save_team_member_request'] is None):
raise ApiValueError("Missing the required parameter `save_team_member_request` when calling `teams_save_team_member_v1`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'save_team_member_request' in local_var_params:
body_params = local_var_params['save_team_member_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/saveteammember', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaveTeamMemberResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_team_v1(self, team_save_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_save_team_v1_with_http_info(team_save_request, **kwargs)
def teams_save_team_v1_with_http_info(self, team_save_request, **kwargs):
local_var_params = locals()
all_params = ['team_save_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_team_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('team_save_request' not in local_var_params or
local_var_params['team_save_request'] is None):
raise ApiValueError("Missing the required parameter `team_save_request` when calling `teams_save_team_v1`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'team_save_request' in local_var_params:
body_params = local_var_params['team_save_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/saveteam', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamSaveResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def teams_save_workgroup_member_v1(self, save_workgroup_member_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, **kwargs)
def teams_save_workgroup_member_v1_with_http_info(self, save_workgroup_member_request, **kwargs):
local_var_params = locals()
all_params = ['save_workgroup_member_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method teams_save_workgroup_member_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('save_workgroup_member_request' not in local_var_params or
local_var_params['save_workgroup_member_request'] is None):
raise ApiValueError("Missing the required parameter `save_workgroup_member_request` when calling `teams_save_workgroup_member_v1`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'save_workgroup_member_request' in local_var_params:
body_params = local_var_params['save_workgroup_member_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/V1/saveworkgroupmember', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaveWorkgroupMemberResponse',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
79021ca66f3988922ffca3051dc7caa3018d166e
| 636
|
py
|
Python
|
setup.py
|
planetceres/avenue
|
ed1369dd759f9ec389d240f624c36e3607583219
|
[
"MIT"
] | 11
|
2019-11-13T00:05:07.000Z
|
2021-04-28T20:45:27.000Z
|
setup.py
|
planetceres/avenue
|
ed1369dd759f9ec389d240f624c36e3607583219
|
[
"MIT"
] | null | null | null |
setup.py
|
planetceres/avenue
|
ed1369dd759f9ec389d240f624c36e3607583219
|
[
"MIT"
] | 1
|
2021-03-01T09:19:32.000Z
|
2021-03-01T09:19:32.000Z
|
from setuptools import setup, find_packages
import os
setup(name='avenue',
version=0.1,
description='Element AI car Simulator',
url='https://github.com/cyrilibrahim/Avenue',
author='ElementAI',
author_email='cyril.ibrahim@elementai.com',
license='',
zip_safe=False,
install_requires=[
"gdown",
# "mlagents==0.5.0",
"gym",
# "mlagents_frozen",
"mlagents @ git+https://git@github.com/rmst/ml-agents-frozen@fd10e3544472b365701da2526a8262e0c8a15784#egg=mlagents",
],
extras_require={},
packages=find_packages()
)
| 28.909091
| 128
| 0.613208
|
from setuptools import setup, find_packages
import os
setup(name='avenue',
version=0.1,
description='Element AI car Simulator',
url='https://github.com/cyrilibrahim/Avenue',
author='ElementAI',
author_email='cyril.ibrahim@elementai.com',
license='',
zip_safe=False,
install_requires=[
"gdown",
"gym",
"mlagents @ git+https://git@github.com/rmst/ml-agents-frozen@fd10e3544472b365701da2526a8262e0c8a15784#egg=mlagents",
],
extras_require={},
packages=find_packages()
)
| true
| true
|
79021cd12d603381c47e7b41a7d3141cddbee4f4
| 3,820
|
py
|
Python
|
framework/SupervisedLearning/pickledROM.py
|
alptezbasaran/raven
|
fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca
|
[
"Apache-2.0"
] | 1
|
2018-07-02T21:12:48.000Z
|
2018-07-02T21:12:48.000Z
|
framework/SupervisedLearning/pickledROM.py
|
alptezbasaran/raven
|
fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca
|
[
"Apache-2.0"
] | null | null | null |
framework/SupervisedLearning/pickledROM.py
|
alptezbasaran/raven
|
fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 8, 2018
@author: talbpaul
Originally from SupervisedLearning.py, split in PR #650 in July 2018
Specific ROM implementation for pickledROM
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .SupervisedLearning import supervisedLearning
#Internal Modules End--------------------------------------------------------------------------------
class pickledROM(supervisedLearning):
"""
Placeholder for ROMs that will be generated by unpickling from file.
"""
def __init__(self,messageHandler,**kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None
"""
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder'
def __confidenceLocal__(self,featureVals):
"""
This should return an estimation of the quality of the prediction.
@ In, featureVals, 2-D numpy array, [n_samples,n_features]
@ Out, confidence, float, the confidence
"""
pass
def __resetLocal__(self):
"""
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
"""
pass
def __returnCurrentSettingLocal__(self):
"""
Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values
"""
pass
def __returnInitialParametersLocal__(self):
"""
Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values
"""
params = {}
return params
def __evaluateLocal__(self,featureVals):
"""
Evaluates a point.
@ In, featureVals, list, of values at which to evaluate the ROM
@ Out, returnDict, dict, the evaluated point for each target
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
def __trainLocal__(self,featureVals,targetVals):
"""
Trains ROM.
@ In, featureVals, np.ndarray, feature values
@ In, targetVals, np.ndarray, target values
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
| 37.821782
| 132
| 0.620942
|
from __future__ import division, print_function, unicode_literals, absolute_import
from .SupervisedLearning import supervisedLearning
class pickledROM(supervisedLearning):
def __init__(self,messageHandler,**kwargs):
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder'
def __confidenceLocal__(self,featureVals):
pass
def __resetLocal__(self):
pass
def __returnCurrentSettingLocal__(self):
pass
def __returnInitialParametersLocal__(self):
params = {}
return params
def __evaluateLocal__(self,featureVals):
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
def __trainLocal__(self,featureVals,targetVals):
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
| true
| true
|
79021cd32bc08de71246244cc8084ac9adc0fbaf
| 2,197
|
py
|
Python
|
ctsimu/test.py
|
BAMresearch/ctsimu-toolbox
|
2329fe0bba8a89061430649c043c70c58835a435
|
[
"Apache-2.0"
] | null | null | null |
ctsimu/test.py
|
BAMresearch/ctsimu-toolbox
|
2329fe0bba8a89061430649c043c70c58835a435
|
[
"Apache-2.0"
] | null | null | null |
ctsimu/test.py
|
BAMresearch/ctsimu-toolbox
|
2329fe0bba8a89061430649c043c70c58835a435
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os # File and path handling
import numpy
import copy # for deepcopy
import math
from .image import ImageFile, Image, ImageROI, ImageStack
from .geometry import Geometry
from .processing.pipeline import Pipeline
from .processing.step import Step
from .helpers import *
def touchDirectory(folder):
if not os.path.exists(folder):
os.makedirs(folder)
class generalTest(Step):
""" General class for test scenario evaluations: get image(s), run and store evaluation. """
def __init__(self, testName="General Test", name=None, nExpectedRuns=1, resultFileDirectory=".", rawOutput=False):
Step.__init__(self, testName)
self.testName = testName
self.subtests = []
self.prepared = False
self.currentRun = 0
self.nExpectedRuns = None # usually, number of projections to evaluate
self.resultFileDirectory = None
self.name = None
self.rawOutput = None
self.setName(name)
self.setExpectedRuns(nExpectedRuns)
self.setResultFileDirectory(resultFileDirectory)
self.setRawOutput(rawOutput)
self.reset()
def reset(self):
self.currentRun = 0
self.prepared = False
def addSubtest(self, subt):
self.subtests.append(subt)
def setName(self, name=None):
""" Set an individual name for the (sub) test. """
if name != None:
self.name = name
else:
self.name = self.testName
def setExpectedRuns(self, n=1):
self.nExpectedRuns = n
def setResultFileDirectory(self, resultFileDirectory="."):
""" Set the location where test results should be saved. """
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory)
def setRawOutput(self, rawOutput=False):
""" Save intermediate projections as RAW instead of TIFF? """
self.rawOutput = rawOutput
def plotResults(self):
""" Plot results of evaluation. """
# Should be called by step's followUp() function, if needed.
pass
| 30.943662
| 119
| 0.631771
|
import os
import numpy
import copy
import math
from .image import ImageFile, Image, ImageROI, ImageStack
from .geometry import Geometry
from .processing.pipeline import Pipeline
from .processing.step import Step
from .helpers import *
def touchDirectory(folder):
if not os.path.exists(folder):
os.makedirs(folder)
class generalTest(Step):
def __init__(self, testName="General Test", name=None, nExpectedRuns=1, resultFileDirectory=".", rawOutput=False):
Step.__init__(self, testName)
self.testName = testName
self.subtests = []
self.prepared = False
self.currentRun = 0
self.nExpectedRuns = None
self.resultFileDirectory = None
self.name = None
self.rawOutput = None
self.setName(name)
self.setExpectedRuns(nExpectedRuns)
self.setResultFileDirectory(resultFileDirectory)
self.setRawOutput(rawOutput)
self.reset()
def reset(self):
self.currentRun = 0
self.prepared = False
def addSubtest(self, subt):
self.subtests.append(subt)
def setName(self, name=None):
if name != None:
self.name = name
else:
self.name = self.testName
def setExpectedRuns(self, n=1):
self.nExpectedRuns = n
def setResultFileDirectory(self, resultFileDirectory="."):
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory)
def setRawOutput(self, rawOutput=False):
self.rawOutput = rawOutput
def plotResults(self):
pass
| true
| true
|
79021d1fbf174a4318e0c00c0590c81b1657a665
| 7,150
|
py
|
Python
|
openpyxlzip/formatting/tests/test_formatting.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
openpyxlzip/formatting/tests/test_formatting.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
openpyxlzip/formatting/tests/test_formatting.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2020 openpyxlzip
# package imports
from openpyxlzip.reader.excel import load_workbook
from openpyxlzip.xml.functions import tostring, fromstring
from openpyxlzip.styles import Border, Side, PatternFill, Color, Font, fills, borders, colors
from openpyxlzip.styles.differential import DifferentialStyle, DifferentialStyleList
from openpyxlzip.formatting.formatting import ConditionalFormattingList
from openpyxlzip.formatting.rule import CellIsRule, FormulaRule, Rule
# test imports
import pytest
from openpyxlzip.tests.helper import compare_xml
class DummyWorkbook():
def __init__(self):
self._differential_styles = DifferentialStyleList()
self.worksheets = []
class DummyWorksheet():
def __init__(self):
self.conditional_formatting = ConditionalFormattingList()
self.parent = DummyWorkbook()
def test_conditional_formatting_read(datadir):
datadir.chdir()
reference_file = 'conditional-formatting.xlsx'
wb = load_workbook(reference_file)
ws = wb.active
rules = ws.conditional_formatting
assert len(rules) == 30
# First test the conditional formatting rules read
rule = rules['A1:A1048576'][0]
assert dict(rule) == {'priority':'30', 'type': 'colorScale', }
rule = rules['B1:B10'][0]
assert dict(rule) == {'priority': '29', 'type': 'colorScale'}
rule = rules['C1:C10'][0]
assert dict(rule) == {'priority': '28', 'type': 'colorScale'}
rule = rules['D1:D10'][0]
assert dict(rule) == {'priority': '27', 'type': 'colorScale', }
rule = rules['E1:E10'][0]
assert dict(rule) == {'priority': '26', 'type': 'colorScale', }
rule = rules['F1:F10'][0]
assert dict(rule) == {'priority': '25', 'type': 'colorScale', }
rule = rules['G1:G10'][0]
assert dict(rule) == {'priority': '24', 'type': 'colorScale', }
rule = rules['H1:H10'][0]
assert dict(rule) == {'priority': '23', 'type': 'colorScale', }
rule = rules['I1:I10'][0]
assert dict(rule) == {'priority': '22', 'type': 'colorScale', }
rule = rules['J1:J10'][0]
assert dict(rule) == {'priority': '21', 'type': 'colorScale', }
rule = rules['K1:K10'][0]
assert dict(rule) == {'priority': '20', 'type': 'dataBar'}
rule = rules['L1:L10'][0]
assert dict(rule) == {'priority': '19', 'type': 'dataBar'}
rule = rules['M1:M10'][0]
assert dict(rule) == {'priority': '18', 'type': 'dataBar'}
rule = rules['N1:N10'][0]
assert dict(rule) == {'priority': '17', 'type': 'iconSet'}
rule = rules['O1:O10'][0]
assert dict(rule) == {'priority': '16', 'type': 'iconSet'}
rule = rules['P1:P10'][0]
assert dict(rule) == {'priority': '15', 'type': 'iconSet'}
rule = rules['Q1:Q10'][0]
assert dict(rule) == {'text': '3', 'priority': '14', 'dxfId': '27',
'operator': 'containsText', 'type': 'containsText'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE')
)
rule = rules['R1:R10'][0]
assert dict(rule) == {'operator': 'between', 'dxfId': '26', 'type':
'cellIs', 'priority': '13'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C6500'),
fill=PatternFill(bgColor='FFFFEB9C'))
rule = rules['S1:S10'][0]
assert dict(rule) == {'priority': '12', 'dxfId': '25', 'percent': '1',
'type': 'top10', 'rank': '10'}
rule = rules['T1:T10'][0]
assert dict(rule) == {'priority': '11', 'dxfId': '24', 'type': 'top10',
'rank': '4', 'bottom': '1'}
rule = rules['U1:U10'][0]
assert dict(rule) == {'priority': '10', 'dxfId': '23', 'type':
'aboveAverage'}
rule = rules['V1:V10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '22', 'type':
'aboveAverage', 'priority': '9'}
rule = rules['W1:W10'][0]
assert dict(rule) == {'priority': '8', 'dxfId': '21', 'type':
'aboveAverage', 'equalAverage': '1'}
rule = rules['X1:X10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '20', 'priority': '7',
'type': 'aboveAverage', 'equalAverage': '1'}
rule = rules['Y1:Y10'][0]
assert dict(rule) == {'priority': '6', 'dxfId': '19', 'type':
'aboveAverage', 'stdDev': '1'}
rule = rules['Z1:Z10'][0]
assert dict(rule)== {'aboveAverage': '0', 'dxfId': '18', 'type':
'aboveAverage', 'stdDev': '1', 'priority': '5'}
assert rule.dxf == DifferentialStyle(font=Font(b=True, i=True, color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE'),
border=Border(
left=Side(style='thin', color=Color(theme=5)),
right=Side(style='thin', color=Color(theme=5)),
top=Side(style='thin', color=Color(theme=5)),
bottom=Side(style='thin', color=Color(theme=5))
)
)
rule = rules['AA1:AA10'][0]
assert dict(rule) == {'priority': '4', 'dxfId': '17', 'type':
'aboveAverage', 'stdDev': '2'}
rule = rules['AB1:AB10'][0]
assert dict(rule) == {'priority': '3', 'dxfId': '16', 'type':
'duplicateValues'}
rule = rules['AC1:AC10'][0]
assert dict(rule) == {'priority': '2', 'dxfId': '15', 'type':
'uniqueValues'}
rule = rules['AD1:AD10'][0]
assert dict(rule) == {'priority': '1', 'dxfId': '14', 'type': 'expression',}
@pytest.fixture
def ConditionalFormatting():
from ..formatting import ConditionalFormatting
return ConditionalFormatting
class TestConditionalFormatting:
def test_ctor(self, ConditionalFormatting):
cf = ConditionalFormatting(sqref="A1:B5")
xml = tostring(cf.to_tree())
expected = """
<conditionalFormatting sqref="A1:B5" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(self, ConditionalFormatting):
src = """
<conditionalFormatting sqref="A1:B5" />
"""
tree = fromstring(src)
cf = ConditionalFormatting.from_tree(tree)
assert cf.sqref == "A1:B5"
def test_eq(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
c2 = ConditionalFormatting("A1:B5", pivot=True)
assert c1 == c2
def test_hash(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert hash(c1) == hash("A1:B5")
def test_repr(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert repr(c1) == "<ConditionalFormatting A1:B5>"
def test_contains(self, ConditionalFormatting):
c2 = ConditionalFormatting("A1:A5 B1:B5")
assert "B2" in c2
| 35.221675
| 93
| 0.554685
|
from openpyxlzip.reader.excel import load_workbook
from openpyxlzip.xml.functions import tostring, fromstring
from openpyxlzip.styles import Border, Side, PatternFill, Color, Font, fills, borders, colors
from openpyxlzip.styles.differential import DifferentialStyle, DifferentialStyleList
from openpyxlzip.formatting.formatting import ConditionalFormattingList
from openpyxlzip.formatting.rule import CellIsRule, FormulaRule, Rule
import pytest
from openpyxlzip.tests.helper import compare_xml
class DummyWorkbook():
def __init__(self):
self._differential_styles = DifferentialStyleList()
self.worksheets = []
class DummyWorksheet():
def __init__(self):
self.conditional_formatting = ConditionalFormattingList()
self.parent = DummyWorkbook()
def test_conditional_formatting_read(datadir):
datadir.chdir()
reference_file = 'conditional-formatting.xlsx'
wb = load_workbook(reference_file)
ws = wb.active
rules = ws.conditional_formatting
assert len(rules) == 30
rule = rules['A1:A1048576'][0]
assert dict(rule) == {'priority':'30', 'type': 'colorScale', }
rule = rules['B1:B10'][0]
assert dict(rule) == {'priority': '29', 'type': 'colorScale'}
rule = rules['C1:C10'][0]
assert dict(rule) == {'priority': '28', 'type': 'colorScale'}
rule = rules['D1:D10'][0]
assert dict(rule) == {'priority': '27', 'type': 'colorScale', }
rule = rules['E1:E10'][0]
assert dict(rule) == {'priority': '26', 'type': 'colorScale', }
rule = rules['F1:F10'][0]
assert dict(rule) == {'priority': '25', 'type': 'colorScale', }
rule = rules['G1:G10'][0]
assert dict(rule) == {'priority': '24', 'type': 'colorScale', }
rule = rules['H1:H10'][0]
assert dict(rule) == {'priority': '23', 'type': 'colorScale', }
rule = rules['I1:I10'][0]
assert dict(rule) == {'priority': '22', 'type': 'colorScale', }
rule = rules['J1:J10'][0]
assert dict(rule) == {'priority': '21', 'type': 'colorScale', }
rule = rules['K1:K10'][0]
assert dict(rule) == {'priority': '20', 'type': 'dataBar'}
rule = rules['L1:L10'][0]
assert dict(rule) == {'priority': '19', 'type': 'dataBar'}
rule = rules['M1:M10'][0]
assert dict(rule) == {'priority': '18', 'type': 'dataBar'}
rule = rules['N1:N10'][0]
assert dict(rule) == {'priority': '17', 'type': 'iconSet'}
rule = rules['O1:O10'][0]
assert dict(rule) == {'priority': '16', 'type': 'iconSet'}
rule = rules['P1:P10'][0]
assert dict(rule) == {'priority': '15', 'type': 'iconSet'}
rule = rules['Q1:Q10'][0]
assert dict(rule) == {'text': '3', 'priority': '14', 'dxfId': '27',
'operator': 'containsText', 'type': 'containsText'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE')
)
rule = rules['R1:R10'][0]
assert dict(rule) == {'operator': 'between', 'dxfId': '26', 'type':
'cellIs', 'priority': '13'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C6500'),
fill=PatternFill(bgColor='FFFFEB9C'))
rule = rules['S1:S10'][0]
assert dict(rule) == {'priority': '12', 'dxfId': '25', 'percent': '1',
'type': 'top10', 'rank': '10'}
rule = rules['T1:T10'][0]
assert dict(rule) == {'priority': '11', 'dxfId': '24', 'type': 'top10',
'rank': '4', 'bottom': '1'}
rule = rules['U1:U10'][0]
assert dict(rule) == {'priority': '10', 'dxfId': '23', 'type':
'aboveAverage'}
rule = rules['V1:V10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '22', 'type':
'aboveAverage', 'priority': '9'}
rule = rules['W1:W10'][0]
assert dict(rule) == {'priority': '8', 'dxfId': '21', 'type':
'aboveAverage', 'equalAverage': '1'}
rule = rules['X1:X10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '20', 'priority': '7',
'type': 'aboveAverage', 'equalAverage': '1'}
rule = rules['Y1:Y10'][0]
assert dict(rule) == {'priority': '6', 'dxfId': '19', 'type':
'aboveAverage', 'stdDev': '1'}
rule = rules['Z1:Z10'][0]
assert dict(rule)== {'aboveAverage': '0', 'dxfId': '18', 'type':
'aboveAverage', 'stdDev': '1', 'priority': '5'}
assert rule.dxf == DifferentialStyle(font=Font(b=True, i=True, color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE'),
border=Border(
left=Side(style='thin', color=Color(theme=5)),
right=Side(style='thin', color=Color(theme=5)),
top=Side(style='thin', color=Color(theme=5)),
bottom=Side(style='thin', color=Color(theme=5))
)
)
rule = rules['AA1:AA10'][0]
assert dict(rule) == {'priority': '4', 'dxfId': '17', 'type':
'aboveAverage', 'stdDev': '2'}
rule = rules['AB1:AB10'][0]
assert dict(rule) == {'priority': '3', 'dxfId': '16', 'type':
'duplicateValues'}
rule = rules['AC1:AC10'][0]
assert dict(rule) == {'priority': '2', 'dxfId': '15', 'type':
'uniqueValues'}
rule = rules['AD1:AD10'][0]
assert dict(rule) == {'priority': '1', 'dxfId': '14', 'type': 'expression',}
@pytest.fixture
def ConditionalFormatting():
from ..formatting import ConditionalFormatting
return ConditionalFormatting
class TestConditionalFormatting:
def test_ctor(self, ConditionalFormatting):
cf = ConditionalFormatting(sqref="A1:B5")
xml = tostring(cf.to_tree())
expected = """
<conditionalFormatting sqref="A1:B5" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(self, ConditionalFormatting):
src = """
<conditionalFormatting sqref="A1:B5" />
"""
tree = fromstring(src)
cf = ConditionalFormatting.from_tree(tree)
assert cf.sqref == "A1:B5"
def test_eq(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
c2 = ConditionalFormatting("A1:B5", pivot=True)
assert c1 == c2
def test_hash(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert hash(c1) == hash("A1:B5")
def test_repr(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert repr(c1) == "<ConditionalFormatting A1:B5>"
def test_contains(self, ConditionalFormatting):
c2 = ConditionalFormatting("A1:A5 B1:B5")
assert "B2" in c2
| true
| true
|
79021dbcba2e186f6f4b712e41dcad4f4c8434a7
| 371
|
py
|
Python
|
raachem/__init__.py
|
ricalmang/raachem
|
d00d634957a27e43e706c7faa565fb15b3cf154c
|
[
"MIT"
] | null | null | null |
raachem/__init__.py
|
ricalmang/raachem
|
d00d634957a27e43e706c7faa565fb15b3cf154c
|
[
"MIT"
] | null | null | null |
raachem/__init__.py
|
ricalmang/raachem
|
d00d634957a27e43e706c7faa565fb15b3cf154c
|
[
"MIT"
] | null | null | null |
from raachem.file_class.gjf import *
from raachem.file_class.inp import *
from raachem.file_class.xyz import *
from raachem.file_class.log import *
from raachem.file_creator.e_analysis import *
from raachem.file_creator.input import *
from raachem.file_creator.xyz import *
from raachem.file_creator.deploy_scripts import *
from raachem.util.gen_purp import *
| 33.727273
| 50
| 0.800539
|
from raachem.file_class.gjf import *
from raachem.file_class.inp import *
from raachem.file_class.xyz import *
from raachem.file_class.log import *
from raachem.file_creator.e_analysis import *
from raachem.file_creator.input import *
from raachem.file_creator.xyz import *
from raachem.file_creator.deploy_scripts import *
from raachem.util.gen_purp import *
| true
| true
|
79021dbf4a6b5d266f763ea1d317aeecc3dde47c
| 5,719
|
py
|
Python
|
torchero/utils/defaults.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 10
|
2020-07-06T13:35:26.000Z
|
2021-08-10T09:46:53.000Z
|
torchero/utils/defaults.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 6
|
2020-07-07T20:52:16.000Z
|
2020-07-14T04:05:02.000Z
|
torchero/utils/defaults.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 1
|
2021-06-28T17:56:11.000Z
|
2021-06-28T17:56:11.000Z
|
from collections import Iterable
from torch import nn
from torch import optim
from torchero import meters
from functools import partial
INVALID_MODE_INFERENCE_MESSAGE = (
"Could not infer mode from meter {meter}"
)
def get_default_mode(meter):
if hasattr(meter.__class__, 'DEFAULT_MODE'):
return getattr(meter.__class__, 'DEFAULT_MODE')
else:
raise Exception(INVALID_MODE_INFERENCE_MESSAGE
.format(meter=getattr(meter, 'name', meter.__class__.__name__)))
optimizers = {
'asgd': optim.ASGD,
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamax': optim.Adamax,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': lambda params: optim.SGD(params, lr=1e-2),
'sparseadam': optim.SparseAdam
}
def get_optimizer_by_name(name, model):
if name not in optimizers:
raise KeyError("Optimizer {} not found. "
"Optimizer availables: {}"
.format(repr(name),
', '.join(map(repr, optimizers.keys()))))
return optimizers[name](model.parameters())
losses = {
'l1': nn.L1Loss,
'mse': nn.MSELoss,
'cross_entropy': nn.CrossEntropyLoss,
'nll': nn.NLLLoss,
'poisson_nll': nn.PoissonNLLLoss,
'kl_div': nn.KLDivLoss,
'binary_cross_entropy': nn.BCELoss,
'binary_cross_entropy_wl': nn.BCEWithLogitsLoss,
'margin_ranking': nn.MarginRankingLoss,
'hinge': nn.HingeEmbeddingLoss,
'multi_label_hinge': nn.MultiLabelMarginLoss,
'smooth': nn.SmoothL1Loss,
'soft_margin': nn.SoftMarginLoss,
'multilabel_soft_margin': nn.MultiLabelSoftMarginLoss,
'cosine': nn.CosineEmbeddingLoss,
'multi_hinge': nn.MultiMarginLoss,
'triplet_margin': nn.TripletMarginLoss
}
def get_loss_by_name(name):
if name not in losses:
raise KeyError("Loss {} not found. Losses available: {}"
.format(repr(name),
', '.join(map(repr, losses.keys()))))
return losses[name]()
meters_by_name = {
'mse': meters.MSE,
'rmse': meters.RMSE,
'msle': meters.MSLE,
'rmsle': meters.RMSLE,
'categorical_accuracy': meters.CategoricalAccuracy,
'categorical_accuracy_percentage': lambda: meters.CategoricalAccuracy() * 100.0,
'binary_accuracy': meters.BinaryAccuracy,
'binary_accuracy_percentage': lambda: meters.BinaryAccuracy() * 100,
'binary_accuracy_wl': meters.BinaryWithLogitsAccuracy,
'binary_accuracy_wl_percentage': lambda: meters.BinaryWithLogitsAccuracy() * 100,
'confusion_matrix': meters.ConfusionMatrix,
'confusion_matrix_percentage': lambda: meters.ConfusionMatrix() * 100,
'balanced_accuracy': meters.BalancedAccuracy,
}
for name, metric in (('recall', meters.Recall),
('precision', meters.Precision),
('npv', meters.NPV),
('specificity', meters.Specificity),
('f1', meters.F1Score),
('f2', meters.F2Score)):
meters_by_name.update({
name: metric,
name + '_wl': partial(metric, with_logits=True)
})
for agg_name in ('micro', 'macro', 'weighted'):
meters_by_name.update({
agg_name + '_' + name: partial(metric, with_logits=False, agg=agg_name),
agg_name + '_' + name + '_wl': partial(metric, with_logits=True, agg=agg_name)
})
for name, speed_metric, pace_metric in (('batches', meters.BatchSpeed, meters.BatchPace),
('it', meters.IterSpeed, meters.IterPace)):
for unit_abbr, unit in (('sec', 'second'),
('min', 'minute')):
meters_by_name.update({name + '/' + unit_abbr: partial(speed_metric, time_unit=unit),
unit_abbr + '/' + name.replace('batches', 'batch'): partial(pace_metric, time_unit=unit)})
def get_meters_by_name(name):
if name not in meters_by_name:
raise KeyError("Meter {} not found. Meters available: {}"
.format(repr(name),
', '.join(map(repr, meters_by_name.keys()))))
return meters_by_name[name]()
def parse_meters(meters):
def to_small_case(obj):
if hasattr(obj, 'name'):
s = str(obj.name)
else:
name = obj.__class__.__name__
s = ''
for i in range(len(name)-1):
s += name[i].lower()
if name[i].islower() and not name[i+1].islower():
s += '_'
s += name[-1].lower()
return s
def parse(obj):
if isinstance(obj, str):
return get_meters_by_name(obj)
else:
return obj
def parse_name(obj):
if isinstance(obj, str):
obj = get_meters_by_name(obj)
return to_small_case(obj)
if isinstance(meters, dict):
return {k: parse(v) for k, v in meters.items()}
elif isinstance(meters, Iterable):
return {parse_name(v): parse(v) for v in meters}
else:
raise Exception("Expected iterable meters")
time_units = {'hour': 60*60,
'hours': 60*60,
'minute': 60,
'minutes': 60,
'second': 1,
'seconds': 1}
def parse_time_unit(time_unit):
if isinstance(time_unit, (int, float)):
return time_unit
elif isinstance(time_unit, str) and time_unit in time_units:
return time_units[time_unit]
elif isinstance(time_unit, str):
raise ValueError("Invalid time_unit reference!")
else:
raise TypeError("Invalid type for time_unit")
| 33.641176
| 121
| 0.600979
|
from collections import Iterable
from torch import nn
from torch import optim
from torchero import meters
from functools import partial
INVALID_MODE_INFERENCE_MESSAGE = (
"Could not infer mode from meter {meter}"
)
def get_default_mode(meter):
if hasattr(meter.__class__, 'DEFAULT_MODE'):
return getattr(meter.__class__, 'DEFAULT_MODE')
else:
raise Exception(INVALID_MODE_INFERENCE_MESSAGE
.format(meter=getattr(meter, 'name', meter.__class__.__name__)))
optimizers = {
'asgd': optim.ASGD,
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamax': optim.Adamax,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': lambda params: optim.SGD(params, lr=1e-2),
'sparseadam': optim.SparseAdam
}
def get_optimizer_by_name(name, model):
if name not in optimizers:
raise KeyError("Optimizer {} not found. "
"Optimizer availables: {}"
.format(repr(name),
', '.join(map(repr, optimizers.keys()))))
return optimizers[name](model.parameters())
losses = {
'l1': nn.L1Loss,
'mse': nn.MSELoss,
'cross_entropy': nn.CrossEntropyLoss,
'nll': nn.NLLLoss,
'poisson_nll': nn.PoissonNLLLoss,
'kl_div': nn.KLDivLoss,
'binary_cross_entropy': nn.BCELoss,
'binary_cross_entropy_wl': nn.BCEWithLogitsLoss,
'margin_ranking': nn.MarginRankingLoss,
'hinge': nn.HingeEmbeddingLoss,
'multi_label_hinge': nn.MultiLabelMarginLoss,
'smooth': nn.SmoothL1Loss,
'soft_margin': nn.SoftMarginLoss,
'multilabel_soft_margin': nn.MultiLabelSoftMarginLoss,
'cosine': nn.CosineEmbeddingLoss,
'multi_hinge': nn.MultiMarginLoss,
'triplet_margin': nn.TripletMarginLoss
}
def get_loss_by_name(name):
if name not in losses:
raise KeyError("Loss {} not found. Losses available: {}"
.format(repr(name),
', '.join(map(repr, losses.keys()))))
return losses[name]()
meters_by_name = {
'mse': meters.MSE,
'rmse': meters.RMSE,
'msle': meters.MSLE,
'rmsle': meters.RMSLE,
'categorical_accuracy': meters.CategoricalAccuracy,
'categorical_accuracy_percentage': lambda: meters.CategoricalAccuracy() * 100.0,
'binary_accuracy': meters.BinaryAccuracy,
'binary_accuracy_percentage': lambda: meters.BinaryAccuracy() * 100,
'binary_accuracy_wl': meters.BinaryWithLogitsAccuracy,
'binary_accuracy_wl_percentage': lambda: meters.BinaryWithLogitsAccuracy() * 100,
'confusion_matrix': meters.ConfusionMatrix,
'confusion_matrix_percentage': lambda: meters.ConfusionMatrix() * 100,
'balanced_accuracy': meters.BalancedAccuracy,
}
for name, metric in (('recall', meters.Recall),
('precision', meters.Precision),
('npv', meters.NPV),
('specificity', meters.Specificity),
('f1', meters.F1Score),
('f2', meters.F2Score)):
meters_by_name.update({
name: metric,
name + '_wl': partial(metric, with_logits=True)
})
for agg_name in ('micro', 'macro', 'weighted'):
meters_by_name.update({
agg_name + '_' + name: partial(metric, with_logits=False, agg=agg_name),
agg_name + '_' + name + '_wl': partial(metric, with_logits=True, agg=agg_name)
})
for name, speed_metric, pace_metric in (('batches', meters.BatchSpeed, meters.BatchPace),
('it', meters.IterSpeed, meters.IterPace)):
for unit_abbr, unit in (('sec', 'second'),
('min', 'minute')):
meters_by_name.update({name + '/' + unit_abbr: partial(speed_metric, time_unit=unit),
unit_abbr + '/' + name.replace('batches', 'batch'): partial(pace_metric, time_unit=unit)})
def get_meters_by_name(name):
if name not in meters_by_name:
raise KeyError("Meter {} not found. Meters available: {}"
.format(repr(name),
', '.join(map(repr, meters_by_name.keys()))))
return meters_by_name[name]()
def parse_meters(meters):
def to_small_case(obj):
if hasattr(obj, 'name'):
s = str(obj.name)
else:
name = obj.__class__.__name__
s = ''
for i in range(len(name)-1):
s += name[i].lower()
if name[i].islower() and not name[i+1].islower():
s += '_'
s += name[-1].lower()
return s
def parse(obj):
if isinstance(obj, str):
return get_meters_by_name(obj)
else:
return obj
def parse_name(obj):
if isinstance(obj, str):
obj = get_meters_by_name(obj)
return to_small_case(obj)
if isinstance(meters, dict):
return {k: parse(v) for k, v in meters.items()}
elif isinstance(meters, Iterable):
return {parse_name(v): parse(v) for v in meters}
else:
raise Exception("Expected iterable meters")
time_units = {'hour': 60*60,
'hours': 60*60,
'minute': 60,
'minutes': 60,
'second': 1,
'seconds': 1}
def parse_time_unit(time_unit):
if isinstance(time_unit, (int, float)):
return time_unit
elif isinstance(time_unit, str) and time_unit in time_units:
return time_units[time_unit]
elif isinstance(time_unit, str):
raise ValueError("Invalid time_unit reference!")
else:
raise TypeError("Invalid type for time_unit")
| true
| true
|
79021f169b28382834d2d33f1ae6c402b7b264ac
| 1,025
|
py
|
Python
|
py/umpire/server/migrations/0010.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
py/umpire/server/migrations/0010.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
py/umpire/server/migrations/0010.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import json
import os
_ENV_DIR = '/var/db/factory/umpire'
_CONFIG_PATH = os.path.join(_ENV_DIR, 'active_umpire.json')
def SaveNewActiveConfig(config):
"""Serialize and saves the configuration as new active config file."""
json_config = json.dumps(
config, indent=2, separators=(',', ': '), sort_keys=True) + '\n'
json_name = 'umpire.%s.json' % (
hashlib.md5(json_config.encode('utf-8')).hexdigest())
json_path = os.path.join('resources', json_name)
with open(os.path.join(_ENV_DIR, json_path), 'w') as f:
f.write(json_config)
os.unlink(_CONFIG_PATH)
os.symlink(json_path, _CONFIG_PATH)
def Migrate():
with open('/var/db/factory/umpire/active_umpire.json') as f:
config = json.load(f)
if 'rulesets' in config:
for r in config['rulesets']:
r.pop('match', None)
SaveNewActiveConfig(config)
| 29.285714
| 72
| 0.702439
|
import hashlib
import json
import os
_ENV_DIR = '/var/db/factory/umpire'
_CONFIG_PATH = os.path.join(_ENV_DIR, 'active_umpire.json')
def SaveNewActiveConfig(config):
json_config = json.dumps(
config, indent=2, separators=(',', ': '), sort_keys=True) + '\n'
json_name = 'umpire.%s.json' % (
hashlib.md5(json_config.encode('utf-8')).hexdigest())
json_path = os.path.join('resources', json_name)
with open(os.path.join(_ENV_DIR, json_path), 'w') as f:
f.write(json_config)
os.unlink(_CONFIG_PATH)
os.symlink(json_path, _CONFIG_PATH)
def Migrate():
with open('/var/db/factory/umpire/active_umpire.json') as f:
config = json.load(f)
if 'rulesets' in config:
for r in config['rulesets']:
r.pop('match', None)
SaveNewActiveConfig(config)
| true
| true
|
790220292734acc000c5c1f43fcdc72877f94430
| 3,583
|
py
|
Python
|
tests/mock.py
|
sahithyaravi1493/modAL
|
39336f21cd872974cf2f34c1c79012ca30a96819
|
[
"MIT"
] | 1,460
|
2018-10-18T18:40:59.000Z
|
2022-03-30T18:00:12.000Z
|
tests/mock.py
|
sahithyaravi1493/modAL
|
39336f21cd872974cf2f34c1c79012ca30a96819
|
[
"MIT"
] | 124
|
2018-10-31T06:48:18.000Z
|
2022-03-25T06:09:25.000Z
|
tests/mock.py
|
sahithyaravi1493/modAL
|
39336f21cd872974cf2f34c1c79012ca30a96819
|
[
"MIT"
] | 236
|
2018-10-19T01:16:21.000Z
|
2022-03-05T02:05:31.000Z
|
from sklearn.exceptions import NotFittedError
class MockFunction:
"""
Mock utility function for testing.
"""
def __init__(self, return_val):
self.return_val = return_val
def __call__(self, *args):
return self.return_val
class MockEstimator:
"""
Mock classifier object for testing.
"""
def __init__(
self, predict_proba_return=None, predict_return=None, score_return=None,
classes_=None, fitted=True
):
self.fitted = fitted
if fitted:
self.classes_ = classes_
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockActiveLearner:
"""
Mock ActiveLearner for testing.
"""
def __init__(
self, predictor=None, query_strategy=None,
predict_proba_return=None, calculate_utility_return=None, predict_return=None, score_return=None,
_X_initial=None, _y_initial=None
):
self.estimator = predictor
self.query_strategy = query_strategy
self.predict_proba_return = predict_proba_return
self.calculate_utility_return = calculate_utility_return
self.predict_return = predict_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
return self.predict_return
def predict_proba(self, *args, **kwargs):
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockCommittee:
"""
Mock Committee for testing.
"""
def __init__(
self, n_learners=1, classes_=None, fitted=True,
calculate_disagreement_return=None,
predict_return=None, predict_proba_return=None,
vote_return=None, vote_proba_return=None
):
self.fitted = fitted
self.n_learners = n_learners
if fitted:
self.classes_ = classes_
else:
self.classes_ = None
self.calculate_disagreement_return = calculate_disagreement_return
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.vote_return = vote_return
self.vote_proba_return = vote_proba_return
def __len__(self):
return self.n_learners
def __iter__(self):
for x in range(self.n_learners):
yield x
def _calculate_disagreement(self, *args, **kwargs):
return self.calculate_disagreement_return
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def vote(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_return
def vote_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_proba_return
| 25.963768
| 109
| 0.640246
|
from sklearn.exceptions import NotFittedError
class MockFunction:
def __init__(self, return_val):
self.return_val = return_val
def __call__(self, *args):
return self.return_val
class MockEstimator:
def __init__(
self, predict_proba_return=None, predict_return=None, score_return=None,
classes_=None, fitted=True
):
self.fitted = fitted
if fitted:
self.classes_ = classes_
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockActiveLearner:
def __init__(
self, predictor=None, query_strategy=None,
predict_proba_return=None, calculate_utility_return=None, predict_return=None, score_return=None,
_X_initial=None, _y_initial=None
):
self.estimator = predictor
self.query_strategy = query_strategy
self.predict_proba_return = predict_proba_return
self.calculate_utility_return = calculate_utility_return
self.predict_return = predict_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
return self.predict_return
def predict_proba(self, *args, **kwargs):
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockCommittee:
def __init__(
self, n_learners=1, classes_=None, fitted=True,
calculate_disagreement_return=None,
predict_return=None, predict_proba_return=None,
vote_return=None, vote_proba_return=None
):
self.fitted = fitted
self.n_learners = n_learners
if fitted:
self.classes_ = classes_
else:
self.classes_ = None
self.calculate_disagreement_return = calculate_disagreement_return
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.vote_return = vote_return
self.vote_proba_return = vote_proba_return
def __len__(self):
return self.n_learners
def __iter__(self):
for x in range(self.n_learners):
yield x
def _calculate_disagreement(self, *args, **kwargs):
return self.calculate_disagreement_return
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def vote(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_return
def vote_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_proba_return
| true
| true
|
790220338e52fd95ded98a8563e8b02afe740b69
| 1,373
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/connection_monitor_query_result.py
|
bgsky/azure-sdk-for-python
|
ec18d0b25be10fddbde416b901b905dfb0896430
|
[
"MIT"
] | null | null | null |
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/connection_monitor_query_result.py
|
bgsky/azure-sdk-for-python
|
ec18d0b25be10fddbde416b901b905dfb0896430
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/connection_monitor_query_result.py
|
bgsky/azure-sdk-for-python
|
ec18d0b25be10fddbde416b901b905dfb0896430
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorQueryResult(Model):
"""List of connection states snaphots.
:param source_status: Status of connection monitor source. Possible values
include: 'Uknown', 'Active', 'Inactive'
:type source_status: str or
~azure.mgmt.network.v2018_10_01.models.ConnectionMonitorSourceStatus
:param states: Information about connection states.
:type states:
list[~azure.mgmt.network.v2018_10_01.models.ConnectionStateSnapshot]
"""
_attribute_map = {
'source_status': {'key': 'sourceStatus', 'type': 'str'},
'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorQueryResult, self).__init__(**kwargs)
self.source_status = kwargs.get('source_status', None)
self.states = kwargs.get('states', None)
| 38.138889
| 78
| 0.633649
|
from msrest.serialization import Model
class ConnectionMonitorQueryResult(Model):
_attribute_map = {
'source_status': {'key': 'sourceStatus', 'type': 'str'},
'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorQueryResult, self).__init__(**kwargs)
self.source_status = kwargs.get('source_status', None)
self.states = kwargs.get('states', None)
| true
| true
|
790221c7c259c055129b58e15300b854208937b1
| 2,775
|
py
|
Python
|
test/data_check/check_linearity_fft.py
|
kjdavidson/NoisePy
|
a7445dd2f68f64cb562d6a87096e5f12a2c3b612
|
[
"MIT"
] | 74
|
2019-11-08T18:32:36.000Z
|
2022-03-27T11:26:53.000Z
|
test/data_check/check_linearity_fft.py
|
kjdavidson/NoisePy
|
a7445dd2f68f64cb562d6a87096e5f12a2c3b612
|
[
"MIT"
] | 23
|
2019-11-10T01:30:04.000Z
|
2022-03-24T10:23:19.000Z
|
test/data_check/check_linearity_fft.py
|
kjdavidson/NoisePy
|
a7445dd2f68f64cb562d6a87096e5f12a2c3b612
|
[
"MIT"
] | 36
|
2019-11-08T19:36:28.000Z
|
2022-02-17T06:31:42.000Z
|
import pyasdf
import numpy as np
import scipy.fftpack
import matplotlib.pyplot as plt
'''
this script takes a chunk of noise spectrum for a station pair and
compare their cross-correlation functions computed using two schemes:
one is averaging the frequency domain and the other is in the time
domain
'''
def cross_correlation1(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros((fft1.shape[0],Nfft),dtype=np.complex64)
ncorr[:,:Nfft//2] = corr[:,:]
ncorr[:,-(Nfft//2)+1:]=np.flip(np.conj(ncorr[:,1:(Nfft//2)]),axis=1)
ncorr[:,0]=complex(0,0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=1)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[:,ind]
ncorr = np.mean(ncorr,axis=0)
return ncorr
def cross_correlation2(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr[0]=complex(0,0)
ncorr = np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0))
print(ncorr.real,ncorr.imag)
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
#-----common parameters------
iday = '2010_01_10'
icomp = 'EHZ'
dt = 0.05
maxlag = 800
sfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.AC2H.h5'
sfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.CHHH.h5'
#-----------reading the data------------
ds1 = pyasdf.ASDFDataSet(sfile1,mode='r')
ds2 = pyasdf.ASDFDataSet(sfile2,mode='r')
spect1 = ds1.auxiliary_data[icomp][iday].data[:]
spect2 = ds2.auxiliary_data[icomp][iday].data[:]
std1 = ds1.auxiliary_data[icomp][iday].parameters['std']
std2 = ds2.auxiliary_data[icomp][iday].parameters['std']
nwin = spect1.shape[0]
nfft = spect1.shape[1]*2
print('data dimension for spect1 and spect2 are %d and %d' % (spect1.ndim,spect2.ndim))
#------select the sections-------
indx1 = np.where(std1<10)[0]
indx2 = np.where(std2<10)[0]
bb=np.intersect1d(indx1,indx2)
print(spect1[bb,:],spect2[bb,:])
corr1=cross_correlation1(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
corr2=cross_correlation2(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
#---plotting----
plt.subplot(311)
plt.plot(corr1)
plt.subplot(312)
plt.plot(corr2)
plt.subplot(313)
plt.plot(corr2)
plt.plot(corr1)
plt.show()
| 31.896552
| 87
| 0.670991
|
import pyasdf
import numpy as np
import scipy.fftpack
import matplotlib.pyplot as plt
def cross_correlation1(fft1,fft2,maxlag,dt,Nfft):
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros((fft1.shape[0],Nfft),dtype=np.complex64)
ncorr[:,:Nfft//2] = corr[:,:]
ncorr[:,-(Nfft//2)+1:]=np.flip(np.conj(ncorr[:,1:(Nfft//2)]),axis=1)
ncorr[:,0]=complex(0,0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=1)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[:,ind]
ncorr = np.mean(ncorr,axis=0)
return ncorr
def cross_correlation2(fft1,fft2,maxlag,dt,Nfft):
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr[0]=complex(0,0)
ncorr = np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0))
print(ncorr.real,ncorr.imag)
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
iday = '2010_01_10'
icomp = 'EHZ'
dt = 0.05
maxlag = 800
sfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.AC2H.h5'
sfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.CHHH.h5'
ds1 = pyasdf.ASDFDataSet(sfile1,mode='r')
ds2 = pyasdf.ASDFDataSet(sfile2,mode='r')
spect1 = ds1.auxiliary_data[icomp][iday].data[:]
spect2 = ds2.auxiliary_data[icomp][iday].data[:]
std1 = ds1.auxiliary_data[icomp][iday].parameters['std']
std2 = ds2.auxiliary_data[icomp][iday].parameters['std']
nwin = spect1.shape[0]
nfft = spect1.shape[1]*2
print('data dimension for spect1 and spect2 are %d and %d' % (spect1.ndim,spect2.ndim))
indx1 = np.where(std1<10)[0]
indx2 = np.where(std2<10)[0]
bb=np.intersect1d(indx1,indx2)
print(spect1[bb,:],spect2[bb,:])
corr1=cross_correlation1(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
corr2=cross_correlation2(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
plt.subplot(311)
plt.plot(corr1)
plt.subplot(312)
plt.plot(corr2)
plt.subplot(313)
plt.plot(corr2)
plt.plot(corr1)
plt.show()
| true
| true
|
7902223521790ef3aa633aeb20fc1246cb025f63
| 178
|
py
|
Python
|
GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 9.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 9.py
|
GREENFONTS/python-challenge-solutions
|
a9aad85a250892fe41961a7d5e77f67b8d14fc1b
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 9.py
|
GREENFONTS/python-challenge-solutions
|
a9aad85a250892fe41961a7d5e77f67b8d14fc1b
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
#Python program to get the size of an object in bytes
import sys
Object = input("Enter any object: ")
print(f'The size of the object {Object} is {sys.getsizeof(Object)} bytes')
| 29.666667
| 74
| 0.735955
|
import sys
Object = input("Enter any object: ")
print(f'The size of the object {Object} is {sys.getsizeof(Object)} bytes')
| true
| true
|
79022247169ae3c9e16acb9d5f54fe737a46879d
| 25,303
|
py
|
Python
|
WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
|
JavaScriptTesting/LJS
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
[
"Apache-2.0"
] | 1
|
2019-06-18T06:52:54.000Z
|
2019-06-18T06:52:54.000Z
|
WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
|
JavaScriptTesting/LJS
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
[
"Apache-2.0"
] | null | null | null |
WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
|
JavaScriptTesting/LJS
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
from webkitpy.common.net.file_uploader import FileUploader
try:
import json
except ImportError:
# python 2.5 compatibility
import webkitpy.thirdparty.simplejson as json
# A JSON results generator for generic tests.
# FIXME: move this code out of the layout_package directory.
_log = logging.getLogger(__name__)
_JSON_PREFIX = "ADD_RESULTS("
_JSON_SUFFIX = ");"
def has_json_wrapper(string):
return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
def strip_json_wrapper(json_content):
# FIXME: Kill this code once the server returns json instead of jsonp.
if has_json_wrapper(json_content):
return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
return json_content
def load_json(filesystem, file_path):
content = filesystem.read_text_file(file_path)
content = strip_json_wrapper(content)
return json.loads(content)
def write_json(filesystem, json_object, file_path, callback=None):
# Specify separators in order to get compact encoding.
json_string = json.dumps(json_object, separators=(',', ':'))
if callback:
json_string = callback + "(" + json_string + ");"
filesystem.write_text_file(file_path, json_string)
def convert_trie_to_flat_paths(trie, prefix=None):
"""Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + "/" + name
if len(data) and not "results" in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
def add_path_to_trie(path, value, trie):
"""Inserts a single flat directory path and associated value into a directory trie structure."""
if not "/" in path:
trie[path] = value
return
directory, slash, rest = path.partition("/")
if not directory in trie:
trie[directory] = {}
add_path_to_trie(rest, value, trie[directory])
def test_timings_trie(port, individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
# FIXME: We already have a TestResult class in test_results.py
class TestResult(object):
"""A simple class that represents a single test result."""
# Test modifier constants.
(NONE, FAILS, FLAKY, DISABLED) = range(4)
def __init__(self, test, failed=False, elapsed_time=0):
self.test_name = test
self.failed = failed
self.test_run_time = elapsed_time
test_name = test
try:
test_name = test.split('.')[1]
except IndexError:
_log.warn("Invalid test name: %s.", test)
pass
if test_name.startswith('FAILS_'):
self.modifier = self.FAILS
elif test_name.startswith('FLAKY_'):
self.modifier = self.FLAKY
elif test_name.startswith('DISABLED_'):
self.modifier = self.DISABLED
else:
self.modifier = self.NONE
def fixable(self):
return self.failed or self.modifier == self.DISABLED
class JSONResultsGeneratorBase(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
# Min time (seconds) that will be added to the JSON.
MIN_TIME = 1
# Note that in non-chromium tests those chars are used to indicate
# test modifiers (FAILS, FLAKY, etc) but not actual test results.
PASS_RESULT = "P"
SKIP_RESULT = "X"
FAIL_RESULT = "F"
FLAKY_RESULT = "L"
NO_DATA_RESULT = "N"
MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
TestResult.DISABLED: SKIP_RESULT,
TestResult.FAILS: FAIL_RESULT,
TestResult.FLAKY: FLAKY_RESULT}
VERSION = 4
VERSION_KEY = "version"
RESULTS = "results"
TIMES = "times"
BUILD_NUMBERS = "buildNumbers"
TIME = "secondsSinceEpoch"
TESTS = "tests"
FIXABLE_COUNT = "fixableCount"
FIXABLE = "fixableCounts"
ALL_FIXABLE_COUNT = "allFixableCount"
RESULTS_FILENAME = "results.json"
TIMES_MS_FILENAME = "times_ms.json"
INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
# FIXME: Remove generate_incremental_results once the reference to it in
# http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
# has been removed.
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
test_results_server=None,
test_type="",
master_name="",
generate_incremental_results=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args
port: port-specific wrapper
builder_name: the builder name (e.g. Webkit).
build_name: the build name (e.g. webkit-rel).
build_number: the build number.
results_file_base_path: Absolute path to the directory containing the
results json file.
builder_base_url: the URL where we have the archived test results.
If this is None no archived results will be retrieved.
test_results_map: A dictionary that maps test_name to TestResult.
svn_repositories: A (json_field_name, svn_path) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
"""
self._port = port
self._filesystem = port._filesystem
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
self._results_directory = results_file_base_path
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
self._svn_repositories = svn_repositories
if not self._svn_repositories:
self._svn_repositories = {}
self._test_results_server = test_results_server
self._test_type = test_type
self._master_name = master_name
self._archived_results = None
def generate_json_output(self):
json_object = self.get_json()
if json_object:
file_path = self._filesystem.join(self._results_directory, self.INCREMENTAL_RESULTS_FILENAME)
write_json(self._filesystem, json_object, file_path)
def generate_times_ms_file(self):
# FIXME: rename to generate_times_ms_file. This needs to be coordinated with
# changing the calls to this on the chromium build slaves.
times = test_timings_trie(self._port, self._test_results_map.values())
file_path = self._filesystem.join(self._results_directory, self.TIMES_MS_FILENAME)
write_json(self._filesystem, times, file_path)
def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
if not results_json:
results_json, error = self._get_archived_json_results()
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the
# bot.
_log.error("Archive directory is inaccessible. Not "
"modifying or clobbering the results.json "
"file: " + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
_log.debug("Builder name (%s) is not in the results.json file."
% builder_name)
self._convert_json_to_current_version(results_json)
if builder_name not in results_json:
results_json[builder_name] = (
self._create_results_for_builder_json())
results_for_builder = results_json[builder_name]
self._insert_generic_metadata(results_for_builder)
self._insert_failure_summaries(results_for_builder)
# Update the all failing tests with result type and time.
tests = results_for_builder[self.TESTS]
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(convert_trie_to_flat_paths(tests))
for test in all_failing_tests:
self._insert_test_time_and_result(test, tests)
return results_json
def set_archived_results(self, archived_results):
self._archived_results = archived_results
def upload_json_files(self, json_files):
"""Uploads the given json_files to the test_results_server (if the
test_results_server is given)."""
if not self._test_results_server:
return
if not self._master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.info("Uploading JSON files for builder: %s", self._builder_name)
attrs = [("builder", self._builder_name),
("testtype", self._test_type),
("master", self._master_name)]
files = [(file, self._filesystem.join(self._results_directory, file))
for file in json_files]
url = "http://%s/testfile/upload" % self._test_results_server
uploader = FileUploader(url)
try:
# Set uploading timeout in case appengine server is having problem.
# 120 seconds are more than enough to upload test results.
uploader.upload(attrs, files, 120)
except Exception, err:
_log.error("Upload failed: %s" % err)
return
_log.info("JSON files uploaded.")
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
for the given test_name."""
if test_name in self._test_results_map:
# Floor for now to get time in seconds.
return int(self._test_results_map[test_name].test_run_time)
return 0
def _get_failed_test_names(self):
"""Returns a set of failed test names."""
return set([r.test_name for r in self._test_results if r.failed])
def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
return self.MODIFIER_TO_CHAR[test_result.modifier]
return self.__class__.PASS_RESULT
def _get_result_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier == TestResult.DISABLED:
return self.__class__.SKIP_RESULT
if test_result.failed:
return self.__class__.FAIL_RESULT
return self.__class__.PASS_RESULT
# FIXME: Callers should use scm.py instead.
# FIXME: Identify and fix the run-time errors that were observed on Windows
# chromium buildbot when we had updated this code to use scm.py once before.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
Args:
in_directory: The directory where svn is to be run.
"""
if self._filesystem.exists(self._filesystem.join(in_directory, '.svn')):
# Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
stdout=subprocess.PIPE).communicate()[0]
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute(
'revision')
except xml.parsers.expat.ExpatError:
return ""
return ""
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON %
(urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
# FIXME: We should talk to the network via a Host object.
results_file = urllib2.urlopen(results_file_url)
info = results_file.info()
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if (http_error.code < 400 and http_error.code >= 500):
error = http_error
except urllib2.URLError, url_error:
error = url_error
if old_results:
# Strip the prefix and suffix so we can get the actual JSON object.
old_results = strip_json_wrapper(old_results)
try:
results_json = json.loads(old_results)
except:
_log.debug("results.json was not valid JSON. Clobbering.")
# The JSON file is not valid JSON. Just clobber the results.
results_json = {}
else:
_log.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
def _insert_failure_summaries(self, results_for_builder):
"""Inserts aggregate pass/failure statistics into the JSON.
This method reads self._test_results and generates
FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
"""
# Insert the number of tests that failed or skipped.
fixable_count = len([r for r in self._test_results if r.fixable()])
self._insert_item_into_raw_list(results_for_builder,
fixable_count, self.FIXABLE_COUNT)
# Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
entry = {}
for test_name in self._test_results_map.iterkeys():
result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
# Insert the pass/skip/failure summary dictionary.
self._insert_item_into_raw_list(results_for_builder, entry,
self.FIXABLE)
# Insert the number of all the tests that are supposed to pass.
all_test_count = len(self._test_results)
self._insert_item_into_raw_list(results_for_builder,
all_test_count, self.ALL_FIXABLE_COUNT)
def _insert_item_into_raw_list(self, results_for_builder, item, key):
"""Inserts the item into the list with the given key in the results for
this builder. Creates the list if no such list exists.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
item: Number or string to insert into the list.
key: Key in results_for_builder for the list to insert into.
"""
if key in results_for_builder:
raw_list = results_for_builder[key]
else:
raw_list = []
raw_list.insert(0, item)
raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
results_for_builder[key] = raw_list
def _insert_item_run_length_encoded(self, item, encoded_results):
"""Inserts the item into the run-length encoded results.
Args:
item: String or number to insert.
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
if len(encoded_results) and item == encoded_results[0][1]:
num_results = encoded_results[0][0]
if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
encoded_results[0][0] = num_results + 1
else:
# Use a list instead of a class for the run-length encoding since
# we want the serialized form to be concise.
encoded_results.insert(0, [1, item])
def _insert_generic_metadata(self, results_for_builder):
""" Inserts generic metadata (such as version number, current time etc)
into the JSON.
Args:
results_for_builder: Dictionary containing the test results for
a single builder.
"""
self._insert_item_into_raw_list(results_for_builder,
self._build_number, self.BUILD_NUMBERS)
# Include SVN revisions for the given repositories.
for (name, path) in self._svn_repositories:
self._insert_item_into_raw_list(results_for_builder,
self._get_svn_revision(path),
name + 'Revision')
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
tests: Dictionary containing test result entries.
"""
result = self._get_result_char(test_name)
time = self._get_test_timing(test_name)
this_test = tests
for segment in test_name.split("/"):
if segment not in this_test:
this_test[segment] = {}
this_test = this_test[segment]
if not len(this_test):
self._populate_results_and_times_json(this_test)
if self.RESULTS in this_test:
self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
else:
this_test[self.RESULTS] = [[1, result]]
if self.TIMES in this_test:
self._insert_item_run_length_encoded(time, this_test[self.TIMES])
else:
this_test[self.TIMES] = [[1, time]]
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
"""
if self.VERSION_KEY in results_json:
archive_version = results_json[self.VERSION_KEY]
if archive_version == self.VERSION:
return
else:
archive_version = 3
# version 3->4
if archive_version == 3:
num_results = len(results_json.values())
for builder, results in results_json.iteritems():
self._convert_tests_to_trie(results)
results_json[self.VERSION_KEY] = self.VERSION
def _convert_tests_to_trie(self, results):
if not self.TESTS in results:
return
test_results = results[self.TESTS]
test_results_trie = {}
for test in test_results.iterkeys():
single_test_result = test_results[test]
add_path_to_trie(test, single_test_result, test_results_trie)
results[self.TESTS] = test_results_trie
def _populate_results_and_times_json(self, results_and_times):
results_and_times[self.RESULTS] = []
results_and_times[self.TIMES] = []
return results_and_times
def _create_results_for_builder_json(self):
results_for_builder = {}
results_for_builder[self.TESTS] = {}
return results_for_builder
def _remove_items_over_max_number_of_builds(self, encoded_list):
"""Removes items from the run-length encoded list after the final
item that exceeds the max number of builds to track.
Args:
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
num_builds = 0
index = 0
for result in encoded_list:
num_builds = num_builds + result[0]
index = index + 1
if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
return encoded_list[:index]
return encoded_list
def _normalize_results_json(self, test, test_name, tests):
""" Prune tests where all runs pass or tests that no longer exist and
truncate all results to maxNumberOfBuilds.
Args:
test: ResultsAndTimes object for this test.
test_name: Name of the test.
tests: The JSON object with all the test results for this builder.
"""
test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
test[self.RESULTS])
test[self.TIMES] = self._remove_items_over_max_number_of_builds(
test[self.TIMES])
is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
self.PASS_RESULT)
is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
self.NO_DATA_RESULT)
max_time = max([time[1] for time in test[self.TIMES]])
# Remove all passes/no-data from the results to reduce noise and
# filesize. If a test passes every run, but takes > MIN_TIME to run,
# don't throw away the data.
if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
del tests[test_name]
def _is_results_all_of_type(self, results, type):
"""Returns whether all the results are of the given type
(e.g. all passes)."""
return len(results) == 1 and results[0][1] == type
# Left here not to break anything.
class JSONResultsGenerator(JSONResultsGeneratorBase):
pass
| 38.279879
| 110
| 0.651306
|
import logging
import os
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
from webkitpy.common.net.file_uploader import FileUploader
try:
import json
except ImportError:
import webkitpy.thirdparty.simplejson as json
_log = logging.getLogger(__name__)
_JSON_PREFIX = "ADD_RESULTS("
_JSON_SUFFIX = ");"
def has_json_wrapper(string):
return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
def strip_json_wrapper(json_content):
if has_json_wrapper(json_content):
return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
return json_content
def load_json(filesystem, file_path):
content = filesystem.read_text_file(file_path)
content = strip_json_wrapper(content)
return json.loads(content)
def write_json(filesystem, json_object, file_path, callback=None):
json_string = json.dumps(json_object, separators=(',', ':'))
if callback:
json_string = callback + "(" + json_string + ");"
filesystem.write_text_file(file_path, json_string)
def convert_trie_to_flat_paths(trie, prefix=None):
"""Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + "/" + name
if len(data) and not "results" in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
def add_path_to_trie(path, value, trie):
"""Inserts a single flat directory path and associated value into a directory trie structure."""
if not "/" in path:
trie[path] = value
return
directory, slash, rest = path.partition("/")
if not directory in trie:
trie[directory] = {}
add_path_to_trie(rest, value, trie[directory])
def test_timings_trie(port, individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
class TestResult(object):
"""A simple class that represents a single test result."""
(NONE, FAILS, FLAKY, DISABLED) = range(4)
def __init__(self, test, failed=False, elapsed_time=0):
self.test_name = test
self.failed = failed
self.test_run_time = elapsed_time
test_name = test
try:
test_name = test.split('.')[1]
except IndexError:
_log.warn("Invalid test name: %s.", test)
pass
if test_name.startswith('FAILS_'):
self.modifier = self.FAILS
elif test_name.startswith('FLAKY_'):
self.modifier = self.FLAKY
elif test_name.startswith('DISABLED_'):
self.modifier = self.DISABLED
else:
self.modifier = self.NONE
def fixable(self):
return self.failed or self.modifier == self.DISABLED
class JSONResultsGeneratorBase(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
MIN_TIME = 1
PASS_RESULT = "P"
SKIP_RESULT = "X"
FAIL_RESULT = "F"
FLAKY_RESULT = "L"
NO_DATA_RESULT = "N"
MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
TestResult.DISABLED: SKIP_RESULT,
TestResult.FAILS: FAIL_RESULT,
TestResult.FLAKY: FLAKY_RESULT}
VERSION = 4
VERSION_KEY = "version"
RESULTS = "results"
TIMES = "times"
BUILD_NUMBERS = "buildNumbers"
TIME = "secondsSinceEpoch"
TESTS = "tests"
FIXABLE_COUNT = "fixableCount"
FIXABLE = "fixableCounts"
ALL_FIXABLE_COUNT = "allFixableCount"
RESULTS_FILENAME = "results.json"
TIMES_MS_FILENAME = "times_ms.json"
INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
test_results_server=None,
test_type="",
master_name="",
generate_incremental_results=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args
port: port-specific wrapper
builder_name: the builder name (e.g. Webkit).
build_name: the build name (e.g. webkit-rel).
build_number: the build number.
results_file_base_path: Absolute path to the directory containing the
results json file.
builder_base_url: the URL where we have the archived test results.
If this is None no archived results will be retrieved.
test_results_map: A dictionary that maps test_name to TestResult.
svn_repositories: A (json_field_name, svn_path) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
"""
self._port = port
self._filesystem = port._filesystem
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
self._results_directory = results_file_base_path
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
self._svn_repositories = svn_repositories
if not self._svn_repositories:
self._svn_repositories = {}
self._test_results_server = test_results_server
self._test_type = test_type
self._master_name = master_name
self._archived_results = None
def generate_json_output(self):
json_object = self.get_json()
if json_object:
file_path = self._filesystem.join(self._results_directory, self.INCREMENTAL_RESULTS_FILENAME)
write_json(self._filesystem, json_object, file_path)
def generate_times_ms_file(self):
times = test_timings_trie(self._port, self._test_results_map.values())
file_path = self._filesystem.join(self._results_directory, self.TIMES_MS_FILENAME)
write_json(self._filesystem, times, file_path)
def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
if not results_json:
results_json, error = self._get_archived_json_results()
if error:
# file at all as it would lose all the information on the
# bot.
_log.error("Archive directory is inaccessible. Not "
"modifying or clobbering the results.json "
"file: " + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
_log.debug("Builder name (%s) is not in the results.json file."
% builder_name)
self._convert_json_to_current_version(results_json)
if builder_name not in results_json:
results_json[builder_name] = (
self._create_results_for_builder_json())
results_for_builder = results_json[builder_name]
self._insert_generic_metadata(results_for_builder)
self._insert_failure_summaries(results_for_builder)
# Update the all failing tests with result type and time.
tests = results_for_builder[self.TESTS]
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(convert_trie_to_flat_paths(tests))
for test in all_failing_tests:
self._insert_test_time_and_result(test, tests)
return results_json
def set_archived_results(self, archived_results):
self._archived_results = archived_results
def upload_json_files(self, json_files):
"""Uploads the given json_files to the test_results_server (if the
test_results_server is given)."""
if not self._test_results_server:
return
if not self._master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.info("Uploading JSON files for builder: %s", self._builder_name)
attrs = [("builder", self._builder_name),
("testtype", self._test_type),
("master", self._master_name)]
files = [(file, self._filesystem.join(self._results_directory, file))
for file in json_files]
url = "http://%s/testfile/upload" % self._test_results_server
uploader = FileUploader(url)
try:
# Set uploading timeout in case appengine server is having problem.
# 120 seconds are more than enough to upload test results.
uploader.upload(attrs, files, 120)
except Exception, err:
_log.error("Upload failed: %s" % err)
return
_log.info("JSON files uploaded.")
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
for the given test_name."""
if test_name in self._test_results_map:
# Floor for now to get time in seconds.
return int(self._test_results_map[test_name].test_run_time)
return 0
def _get_failed_test_names(self):
"""Returns a set of failed test names."""
return set([r.test_name for r in self._test_results if r.failed])
def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
return self.MODIFIER_TO_CHAR[test_result.modifier]
return self.__class__.PASS_RESULT
def _get_result_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier == TestResult.DISABLED:
return self.__class__.SKIP_RESULT
if test_result.failed:
return self.__class__.FAIL_RESULT
return self.__class__.PASS_RESULT
# FIXME: Callers should use scm.py instead.
# FIXME: Identify and fix the run-time errors that were observed on Windows
# chromium buildbot when we had updated this code to use scm.py once before.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
Args:
in_directory: The directory where svn is to be run.
"""
if self._filesystem.exists(self._filesystem.join(in_directory, '.svn')):
# Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
stdout=subprocess.PIPE).communicate()[0]
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute(
'revision')
except xml.parsers.expat.ExpatError:
return ""
return ""
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON %
(urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
# FIXME: We should talk to the network via a Host object.
results_file = urllib2.urlopen(results_file_url)
info = results_file.info()
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if (http_error.code < 400 and http_error.code >= 500):
error = http_error
except urllib2.URLError, url_error:
error = url_error
if old_results:
old_results = strip_json_wrapper(old_results)
try:
results_json = json.loads(old_results)
except:
_log.debug("results.json was not valid JSON. Clobbering.")
results_json = {}
else:
_log.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
def _insert_failure_summaries(self, results_for_builder):
"""Inserts aggregate pass/failure statistics into the JSON.
This method reads self._test_results and generates
FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
"""
fixable_count = len([r for r in self._test_results if r.fixable()])
self._insert_item_into_raw_list(results_for_builder,
fixable_count, self.FIXABLE_COUNT)
entry = {}
for test_name in self._test_results_map.iterkeys():
result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
self._insert_item_into_raw_list(results_for_builder, entry,
self.FIXABLE)
all_test_count = len(self._test_results)
self._insert_item_into_raw_list(results_for_builder,
all_test_count, self.ALL_FIXABLE_COUNT)
def _insert_item_into_raw_list(self, results_for_builder, item, key):
"""Inserts the item into the list with the given key in the results for
this builder. Creates the list if no such list exists.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
item: Number or string to insert into the list.
key: Key in results_for_builder for the list to insert into.
"""
if key in results_for_builder:
raw_list = results_for_builder[key]
else:
raw_list = []
raw_list.insert(0, item)
raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
results_for_builder[key] = raw_list
def _insert_item_run_length_encoded(self, item, encoded_results):
"""Inserts the item into the run-length encoded results.
Args:
item: String or number to insert.
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
if len(encoded_results) and item == encoded_results[0][1]:
num_results = encoded_results[0][0]
if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
encoded_results[0][0] = num_results + 1
else:
encoded_results.insert(0, [1, item])
def _insert_generic_metadata(self, results_for_builder):
""" Inserts generic metadata (such as version number, current time etc)
into the JSON.
Args:
results_for_builder: Dictionary containing the test results for
a single builder.
"""
self._insert_item_into_raw_list(results_for_builder,
self._build_number, self.BUILD_NUMBERS)
for (name, path) in self._svn_repositories:
self._insert_item_into_raw_list(results_for_builder,
self._get_svn_revision(path),
name + 'Revision')
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
tests: Dictionary containing test result entries.
"""
result = self._get_result_char(test_name)
time = self._get_test_timing(test_name)
this_test = tests
for segment in test_name.split("/"):
if segment not in this_test:
this_test[segment] = {}
this_test = this_test[segment]
if not len(this_test):
self._populate_results_and_times_json(this_test)
if self.RESULTS in this_test:
self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
else:
this_test[self.RESULTS] = [[1, result]]
if self.TIMES in this_test:
self._insert_item_run_length_encoded(time, this_test[self.TIMES])
else:
this_test[self.TIMES] = [[1, time]]
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
"""
if self.VERSION_KEY in results_json:
archive_version = results_json[self.VERSION_KEY]
if archive_version == self.VERSION:
return
else:
archive_version = 3
if archive_version == 3:
num_results = len(results_json.values())
for builder, results in results_json.iteritems():
self._convert_tests_to_trie(results)
results_json[self.VERSION_KEY] = self.VERSION
def _convert_tests_to_trie(self, results):
if not self.TESTS in results:
return
test_results = results[self.TESTS]
test_results_trie = {}
for test in test_results.iterkeys():
single_test_result = test_results[test]
add_path_to_trie(test, single_test_result, test_results_trie)
results[self.TESTS] = test_results_trie
def _populate_results_and_times_json(self, results_and_times):
results_and_times[self.RESULTS] = []
results_and_times[self.TIMES] = []
return results_and_times
def _create_results_for_builder_json(self):
results_for_builder = {}
results_for_builder[self.TESTS] = {}
return results_for_builder
def _remove_items_over_max_number_of_builds(self, encoded_list):
"""Removes items from the run-length encoded list after the final
item that exceeds the max number of builds to track.
Args:
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
num_builds = 0
index = 0
for result in encoded_list:
num_builds = num_builds + result[0]
index = index + 1
if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
return encoded_list[:index]
return encoded_list
def _normalize_results_json(self, test, test_name, tests):
""" Prune tests where all runs pass or tests that no longer exist and
truncate all results to maxNumberOfBuilds.
Args:
test: ResultsAndTimes object for this test.
test_name: Name of the test.
tests: The JSON object with all the test results for this builder.
"""
test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
test[self.RESULTS])
test[self.TIMES] = self._remove_items_over_max_number_of_builds(
test[self.TIMES])
is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
self.PASS_RESULT)
is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
self.NO_DATA_RESULT)
max_time = max([time[1] for time in test[self.TIMES]])
if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
del tests[test_name]
def _is_results_all_of_type(self, results, type):
"""Returns whether all the results are of the given type
(e.g. all passes)."""
return len(results) == 1 and results[0][1] == type
# Left here not to break anything.
class JSONResultsGenerator(JSONResultsGeneratorBase):
pass
| false
| true
|
790222b163067014c484f87531fe0ebfc7a295fa
| 5,094
|
py
|
Python
|
tests/components/stream/test_hls.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 3
|
2021-04-27T16:37:48.000Z
|
2022-02-23T02:47:33.000Z
|
tests/components/stream/test_hls.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 48
|
2019-02-06T22:08:09.000Z
|
2022-03-31T06:02:22.000Z
|
tests/components/stream/test_hls.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 4
|
2019-02-04T15:56:36.000Z
|
2020-12-03T02:03:45.000Z
|
"""The tests for hls streams."""
from datetime import timedelta
from unittest.mock import patch
from urllib.parse import urlparse
import av
from homeassistant.components.stream import request_stream
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
async def test_hls_stream(hass, hass_client, stream_worker_sync):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch init
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + "/" + playlist.splitlines()[-1]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
stream_worker_sync.resume()
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_timeout(hass, hass_client, stream_worker_sync):
"""Test hls stream timeout."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
stream_worker_sync.resume()
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessible
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_ended(hass, stream_worker_sync):
"""Test hls stream packets ended."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
# Request stream
request_stream(hass, source)
# Run it dead
while True:
segment = await track.recv()
if segment is None:
break
segments = segment.sequence
# Allow worker to finalize once enough of the stream is been consumed
if segments > 1:
stream_worker_sync.resume()
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
async def test_stream_keepalive(hass):
"""Test hls stream retries the stream when keepalive=True."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = "test_stream_keepalive_source"
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
cur_time = 0
def time_side_effect():
nonlocal cur_time
if cur_time >= 80:
stream.keepalive = False # Thread should exit and be joinable.
cur_time += 40
return cur_time
with patch("av.open") as av_open, patch(
"homeassistant.components.stream.worker.time"
) as mock_time, patch(
"homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT", 0
):
av_open.side_effect = av.error.InvalidDataError(-2, "error")
mock_time.time.side_effect = time_side_effect
# Request stream
request_stream(hass, source, keepalive=True)
stream._thread.join()
stream._thread = None
assert av_open.call_count == 2
# Stop stream, if it hasn't quit already
stream.stop()
| 29.616279
| 78
| 0.698469
|
from datetime import timedelta
from unittest.mock import patch
from urllib.parse import urlparse
import av
from homeassistant.components.stream import request_stream
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
async def test_hls_stream(hass, hass_client, stream_worker_sync):
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
url = request_stream(hass, source)
http_client = await hass_client()
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + "/" + playlist.splitlines()[-1]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
stream_worker_sync.resume()
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_timeout(hass, hass_client, stream_worker_sync):
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
stream_worker_sync.resume()
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessible
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_ended(hass, stream_worker_sync):
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
# Request stream
request_stream(hass, source)
# Run it dead
while True:
segment = await track.recv()
if segment is None:
break
segments = segment.sequence
# Allow worker to finalize once enough of the stream is been consumed
if segments > 1:
stream_worker_sync.resume()
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
async def test_stream_keepalive(hass):
await async_setup_component(hass, "stream", {"stream": {}})
source = "test_stream_keepalive_source"
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
cur_time = 0
def time_side_effect():
nonlocal cur_time
if cur_time >= 80:
stream.keepalive = False
cur_time += 40
return cur_time
with patch("av.open") as av_open, patch(
"homeassistant.components.stream.worker.time"
) as mock_time, patch(
"homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT", 0
):
av_open.side_effect = av.error.InvalidDataError(-2, "error")
mock_time.time.side_effect = time_side_effect
request_stream(hass, source, keepalive=True)
stream._thread.join()
stream._thread = None
assert av_open.call_count == 2
stream.stop()
| true
| true
|
79022355a0d53b3d1c4f3382dd17d1dccb6b5867
| 2,065
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/tf/reduce_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2019-09-22T01:05:07.000Z
|
2019-09-22T01:05:07.000Z
|
tools/mo/openvino/tools/mo/front/tf/reduce_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/mo/openvino/tools/mo/front/tf/reduce_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 2
|
2021-07-14T07:40:50.000Z
|
2021-07-27T01:40:03.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum, ReduceL2, ReduceMin
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class AllFrontExtractor(FrontExtractorOp):
op = 'All'
enabled = True
@classmethod
def extract(cls, node: Node):
keep_dims = node.pb.attr['keep_dims'].b
ReduceAnd.update_node_stat(node, {'keep_dims': keep_dims})
return cls.enabled
class MaxFrontExtractor(FrontExtractorOp):
op = 'Max'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMax.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MinFrontExtractor(FrontExtractorOp):
op = 'Min'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMin.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MeanExtractor(FrontExtractorOp):
op = 'Mean'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMean.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class ProdFrontExtractor(FrontExtractorOp):
op = 'Prod'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceProd.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class SumFrontExtractor(FrontExtractorOp):
op = 'Sum'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceSum.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class EuclideanNormFrontExtractor(FrontExtractorOp):
op = 'EuclideanNorm'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceL2.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
| 26.474359
| 120
| 0.685714
|
from openvino.tools.mo.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum, ReduceL2, ReduceMin
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class AllFrontExtractor(FrontExtractorOp):
op = 'All'
enabled = True
@classmethod
def extract(cls, node: Node):
keep_dims = node.pb.attr['keep_dims'].b
ReduceAnd.update_node_stat(node, {'keep_dims': keep_dims})
return cls.enabled
class MaxFrontExtractor(FrontExtractorOp):
op = 'Max'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMax.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MinFrontExtractor(FrontExtractorOp):
op = 'Min'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMin.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MeanExtractor(FrontExtractorOp):
op = 'Mean'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMean.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class ProdFrontExtractor(FrontExtractorOp):
op = 'Prod'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceProd.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class SumFrontExtractor(FrontExtractorOp):
op = 'Sum'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceSum.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class EuclideanNormFrontExtractor(FrontExtractorOp):
op = 'EuclideanNorm'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceL2.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
| true
| true
|
79022408e15b78bf1ccfa1fa8a176ba4db1d9b7f
| 38,012
|
py
|
Python
|
sonic_installer/main.py
|
sumukhatv/sonic-utilities
|
2b12aadeed3a86ca4ede3aa30b451914c4acb00b
|
[
"Apache-2.0"
] | null | null | null |
sonic_installer/main.py
|
sumukhatv/sonic-utilities
|
2b12aadeed3a86ca4ede3aa30b451914c4acb00b
|
[
"Apache-2.0"
] | null | null | null |
sonic_installer/main.py
|
sumukhatv/sonic-utilities
|
2b12aadeed3a86ca4ede3aa30b451914c4acb00b
|
[
"Apache-2.0"
] | null | null | null |
import configparser
import os
import re
import subprocess
import sys
import time
import utilities_common.cli as clicommon
from urllib.request import urlopen, urlretrieve
import click
from sonic_py_common import logger
from swsscommon.swsscommon import SonicV2Connector
from .bootloader import get_bootloader
from .common import (
run_command, run_command_or_raise,
IMAGE_PREFIX,
UPPERDIR_NAME,
WORKDIR_NAME,
DOCKERDIR_NAME,
)
from .exception import SonicRuntimeException
SYSLOG_IDENTIFIER = "sonic-installer"
LOG_ERR = logger.Logger.LOG_PRIORITY_ERROR
LOG_WARN = logger.Logger.LOG_PRIORITY_WARNING
LOG_NOTICE = logger.Logger.LOG_PRIORITY_NOTICE
# Global Config object
_config = None
# Global logger instance
log = logger.Logger(SYSLOG_IDENTIFIER)
# This is from the aliases example:
# https://github.com/pallets/click/blob/57c6f09611fc47ca80db0bd010f05998b3c0aa95/examples/aliases/aliases.py
class Config(object):
"""Object to hold CLI config"""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
class AliasedGroup(click.Group):
"""This subclass of click.Group supports abbreviations and
looking up aliases in a config file with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
global _config
# If we haven't instantiated our global config, do it now and load current config
if _config is None:
_config = Config()
# Load our config file
cfg_file = os.path.join(os.path.dirname(__file__), 'aliases.ini')
_config.read_config(cfg_file)
# Try to get builtin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# No builtin found. Look up an explicit command alias in the config
if cmd_name in _config.aliases:
actual_cmd = _config.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
#
# Helper functions
#
_start_time = None
_last_time = None
def reporthook(count, block_size, total_size):
global _start_time, _last_time
cur_time = int(time.time())
if count == 0:
_start_time = cur_time
_last_time = cur_time
return
if cur_time == _last_time:
return
_last_time = cur_time
duration = cur_time - _start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
time_left = (total_size - progress_size) / speed / 1024
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds left... " %
(percent, progress_size / (1024 * 1024), speed, time_left))
sys.stdout.flush()
# TODO: Embed tag name info into docker image meta data at build time,
# and extract tag name from docker image file.
def get_docker_tag_name(image):
# Try to get tag name from label metadata
cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
return "unknown"
tag = out.rstrip()
if tag == "<no value>":
return "unknown"
return tag
def echo_and_log(msg, priority=LOG_NOTICE, fg=None):
if priority >= LOG_ERR:
# Print to stderr if priority is error
click.secho(msg, fg=fg, err=True)
else:
click.secho(msg, fg=fg)
log.log(priority, msg, False)
# Function which validates whether a given URL specifies an existent file
# on a reachable remote machine. Will abort the current operation if not
def validate_url_or_abort(url):
# Attempt to retrieve HTTP response code
try:
urlfile = urlopen(url)
response_code = urlfile.getcode()
urlfile.close()
except IOError:
response_code = None
if not response_code:
echo_and_log("Did not receive a response from remote machine. Aborting...", LOG_ERR)
raise click.Abort()
else:
# Check for a 4xx response code which indicates a nonexistent URL
if response_code / 100 == 4:
echo_and_log("Image file not found on remote machine. Aborting...", LOG_ERR)
raise click.Abort()
# Callback for confirmation prompt. Aborts if user enters "n"
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def get_container_image_name(container_name):
# example image: docker-lldp-sv2:latest
cmd = "docker inspect --format '{{.Config.Image}}' " + container_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.exit(proc.returncode)
image_latest = out.rstrip()
# example image_name: docker-lldp-sv2
cmd = "echo " + image_latest + " | cut -d ':' -f 1"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_name = proc.stdout.read().rstrip()
return image_name
def get_container_image_id(image_tag):
# TODO: extract commond docker info fetching functions
# this is image_id for image with tag, like 'docker-teamd:latest'
cmd = "docker images --format '{{.ID}}' " + image_tag
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id = proc.stdout.read().rstrip()
return image_id
def get_container_image_id_all(image_name):
# All images id under the image name like 'docker-teamd'
cmd = "docker images --format '{{.ID}}' " + image_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id_all = proc.stdout.read()
image_id_all = image_id_all.splitlines()
image_id_all = set(image_id_all)
return image_id_all
def hget_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hget(_hash, key)
def hdel_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hdel(_hash, key)
def print_deprecation_warning(deprecated_cmd_or_subcmd, new_cmd_or_subcmd):
click.secho("Warning: '{}' {}command is deprecated and will be removed in the future"
.format(deprecated_cmd_or_subcmd, "" if deprecated_cmd_or_subcmd == "sonic_installer" else "sub"),
fg="red", err=True)
click.secho("Please use '{}' instead".format(new_cmd_or_subcmd), fg="red", err=True)
def mount_squash_fs(squashfs_path, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point])
def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True, raise_exception=True):
flags = []
if read_only:
flags.append("-r")
if force:
flags.append("-f")
if recursive:
flags.append("-R")
run_command_or_raise(["umount", *flags, mount_point], raise_exception=raise_exception)
if remove_dir:
run_command_or_raise(["rm", "-rf", mount_point], raise_exception=raise_exception)
def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
overlay_options = "rw,relatime,lowerdir={},upperdir={},workdir={}".format(lowerdir, upperdir, workdir)
run_command_or_raise(["mount", "overlay", "-t", "overlay", "-o", overlay_options, mount_point])
def mount_bind(source, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "--bind", source, mount_point])
def mount_procfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "proc", "/proc", "-t", "proc"])
def mount_sysfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "sysfs", "/sys", "-t", "sysfs"])
def update_sonic_environment(bootloader, binary_image_version):
"""Prepare sonic environment variable using incoming image template file. If incoming image template does not exist
use current image template file.
"""
SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml")
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_mount = os.path.join('/', "tmp", "image-{0}-fs".format(sonic_version))
env_dir = os.path.join(new_image_dir, "sonic-config")
env_file = os.path.join(env_dir, "sonic-environment")
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE)
next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE)
sonic_env = run_command_or_raise([
"sonic-cfggen",
"-d",
"-y",
next_sonic_version_yml_file,
"-t",
next_sonic_env_template_file,
])
os.mkdir(env_dir, 0o755)
with open(env_file, "w+") as ef:
print(sonic_env, file=ef)
os.chmod(env_file, 0o644)
except SonicRuntimeException as ex:
echo_and_log("Warning: SONiC environment variables are not supported for this image: {0}".format(str(ex)), LOG_ERR, fg="red")
if os.path.exists(env_file):
os.remove(env_file)
os.rmdir(env_dir)
finally:
umount(new_image_mount)
def migrate_sonic_packages(bootloader, binary_image_version):
""" Migrate SONiC packages to new SONiC image. """
SONIC_PACKAGE_MANAGER = "sonic-package-manager"
PACKAGE_MANAGER_DIR = "/var/lib/sonic-package-manager/"
DOCKER_CTL_SCRIPT = "/usr/lib/docker/docker.sh"
DOCKERD_SOCK = "docker.sock"
VAR_RUN_PATH = "/var/run/"
tmp_dir = "tmp"
packages_file = "packages.json"
packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file)
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME)
new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME)
new_image_docker_dir = os.path.join(new_image_dir, DOCKERDIR_NAME)
new_image_mount = os.path.join("/", tmp_dir, "image-{0}-fs".format(sonic_version))
new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker")
if not os.path.isdir(new_image_docker_dir):
# NOTE: This codepath can be reached if the installation process did not
# extract the default dockerfs. This can happen with docker_inram
# though the bootloader class should have disabled the package
# migration which is why this message is a non fatal error message.
echo_and_log("Error: SONiC package migration cannot proceed due to missing docker folder", LOG_ERR, fg="red")
return
docker_started = False
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
# make sure upper dir and work dir exist
run_command_or_raise(["mkdir", "-p", new_image_upper_dir])
run_command_or_raise(["mkdir", "-p", new_image_work_dir])
mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount)
mount_bind(new_image_docker_dir, new_image_docker_mount)
mount_procfs_chroot(new_image_mount)
mount_sysfs_chroot(new_image_mount)
# Assume if docker.sh script exists we are installing Application Extension compatible image.
if not os.path.exists(os.path.join(new_image_mount, os.path.relpath(DOCKER_CTL_SCRIPT, os.path.abspath(os.sep)))):
echo_and_log("Warning: SONiC Application Extension is not supported in this image", LOG_WARN, fg="yellow")
return
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"])
docker_started = True
run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)])
run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["mount", "--bind",
os.path.join(VAR_RUN_PATH, DOCKERD_SOCK),
os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["chroot", new_image_mount, "sh", "-c", "command -v {}".format(SONIC_PACKAGE_MANAGER)])
run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate",
os.path.join("/", tmp_dir, packages_file),
"--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK),
"-y"])
finally:
if docker_started:
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "stop"], raise_exception=False)
umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False)
umount(new_image_mount, raise_exception=False)
class SWAPAllocator(object):
"""Context class to allocate SWAP memory."""
SWAP_MEM_SIZE = 1024
DISK_FREESPACE_THRESHOLD = 4 * 1024
TOTAL_MEM_THRESHOLD = 2048
AVAILABLE_MEM_THRESHOLD = 1200
SWAP_FILE_PATH = '/host/swapfile'
KiB_TO_BYTES_FACTOR = 1024
MiB_TO_BYTES_FACTOR = 1024 * 1024
def __init__(self, allocate, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
"""
Initialize the SWAP memory allocator.
The allocator will try to setup SWAP memory only if all the below conditions are met:
- allocate evaluates to True
- disk has enough space(> DISK_MEM_THRESHOLD)
- either system total memory < total_mem_threshold or system available memory < available_mem_threshold
@param allocate: True to allocate SWAP memory if necessarry
@param swap_mem_size: the size of SWAP memory to allocate(in MiB)
@param total_mem_threshold: the system totla memory threshold(in MiB)
@param available_mem_threshold: the system available memory threshold(in MiB)
"""
self.allocate = allocate
self.swap_mem_size = SWAPAllocator.SWAP_MEM_SIZE if swap_mem_size is None else swap_mem_size
self.total_mem_threshold = SWAPAllocator.TOTAL_MEM_THRESHOLD if total_mem_threshold is None else total_mem_threshold
self.available_mem_threshold = SWAPAllocator.AVAILABLE_MEM_THRESHOLD if available_mem_threshold is None else available_mem_threshold
self.is_allocated = False
@staticmethod
def get_disk_freespace(path):
"""Return free disk space in bytes."""
fs_stats = os.statvfs(path)
return fs_stats.f_bsize * fs_stats.f_bavail
@staticmethod
def read_from_meminfo():
"""Read information from /proc/meminfo."""
meminfo = {}
with open("/proc/meminfo") as fd:
for line in fd.readlines():
if line:
fields = line.split()
if len(fields) >= 2 and fields[1].isdigit():
meminfo[fields[0].rstrip(":")] = int(fields[1])
return meminfo
def setup_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
with open(swapfile, 'wb') as fd:
os.posix_fallocate(fd.fileno(), 0, self.swap_mem_size * SWAPAllocator.MiB_TO_BYTES_FACTOR)
os.chmod(swapfile, 0o600)
run_command(f'mkswap {swapfile}; swapon {swapfile}')
def remove_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
run_command_or_raise(['swapoff', swapfile], raise_exception=False)
try:
os.unlink(swapfile)
finally:
pass
def __enter__(self):
if self.allocate:
if self.get_disk_freespace('/host') < max(SWAPAllocator.DISK_FREESPACE_THRESHOLD, self.swap_mem_size) * SWAPAllocator.MiB_TO_BYTES_FACTOR:
echo_and_log("Failed to setup SWAP memory due to insufficient disk free space...", LOG_ERR)
return
meminfo = self.read_from_meminfo()
mem_total_in_bytes = meminfo["MemTotal"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
mem_avail_in_bytes = meminfo["MemAvailable"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
if (mem_total_in_bytes < self.total_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR
or mem_avail_in_bytes < self.available_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR):
echo_and_log("Setup SWAP memory")
swapfile = SWAPAllocator.SWAP_FILE_PATH
if os.path.exists(swapfile):
self.remove_swapmem()
try:
self.setup_swapmem()
except Exception:
self.remove_swapmem()
raise
self.is_allocated = True
def __exit__(self, *exc_info):
if self.is_allocated:
self.remove_swapmem()
def validate_positive_int(ctx, param, value):
"""Callback to validate param passed is a positive integer."""
if isinstance(value, int) and value > 0:
return value
raise click.BadParameter("Must be a positive integer")
# Main entrypoint
@click.group(cls=AliasedGroup)
def sonic_installer():
""" SONiC image installation manager """
if os.geteuid() != 0:
exit("Root privileges required for this operation")
# Warn the user if they are calling the deprecated version of the command (with an underscore instead of a hyphen)
if os.path.basename(sys.argv[0]) == "sonic_installer":
print_deprecation_warning("sonic_installer", "sonic-installer")
# Install image
@sonic_installer.command('install')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New image will be installed, continue?')
@click.option('-f', '--force', is_flag=True,
help="Force installation of an image of a type which differs from that of the current running image")
@click.option('--skip_migration', is_flag=True,
help="Do not migrate current configuration to the newly installed image")
@click.option('--skip-package-migration', is_flag=True,
help="Do not migrate current packages to the newly installed image")
@click.option('--skip-setup-swap', is_flag=True,
help='Skip setup temporary SWAP memory used for installation')
@click.option('--swap-mem-size', default=1024, type=int, show_default='1024 MiB',
help='SWAP memory space size', callback=validate_positive_int,
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'])
@click.option('--total-mem-threshold', default=2048, type=int, show_default='2048 MiB',
help='If system total memory is lower than threshold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.option('--available-mem-threshold', default=1200, type=int, show_default='1200 MiB',
help='If system available memory is lower than threhold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.argument('url')
def install(url, force, skip_migration=False, skip_package_migration=False,
skip_setup_swap=False, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
""" Install image from local binary or URL"""
bootloader = get_bootloader()
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook)
click.echo('')
except Exception as e:
echo_and_log("Download error", e)
raise click.Abort()
image_path = bootloader.DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
binary_image_version = bootloader.get_binary_image_version(image_path)
if not binary_image_version:
echo_and_log("Image file does not exist or is not a valid SONiC image file", LOG_ERR)
raise click.Abort()
# Is this version already installed?
if binary_image_version in bootloader.get_installed_images():
echo_and_log("Image {} is already installed. Setting it as default...".format(binary_image_version))
if not bootloader.set_default_image(binary_image_version):
echo_and_log('Error: Failed to set image as default', LOG_ERR)
raise click.Abort()
else:
# Verify that the binary image is of the same type as the running image
if not bootloader.verify_binary_image(image_path) and not force:
echo_and_log("Image file '{}' is of a different type than running image.\n".format(url) +
"If you are sure you want to install this image, use -f|--force.\n" +
"Aborting...", LOG_ERR)
raise click.Abort()
echo_and_log("Installing image {} and setting it as default...".format(binary_image_version))
with SWAPAllocator(not skip_setup_swap, swap_mem_size, total_mem_threshold, available_mem_threshold):
bootloader.install_image(image_path)
# Take a backup of current configuration
if skip_migration:
echo_and_log("Skipping configuration migration as requested in the command option.")
else:
run_command('config-setup backup')
update_sonic_environment(bootloader, binary_image_version)
if not bootloader.supports_package_migration(binary_image_version) and not skip_package_migration:
echo_and_log("Warning: SONiC package migration is not supported for this bootloader/image", fg="yellow")
skip_package_migration = True
if not skip_package_migration:
migrate_sonic_packages(bootloader, binary_image_version)
# Finally, sync filesystem
run_command("sync;sync;sync")
run_command("sleep 3") # wait 3 seconds after sync
echo_and_log('Done')
# List installed images
@sonic_installer.command('list')
def list_command():
""" Print installed images """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
click.echo("Current: " + curimage)
click.echo("Next: " + nextimage)
click.echo("Available: ")
for image in images:
click.echo(image)
# Set default image for boot
@sonic_installer.command('set-default')
@click.argument('image')
def set_default(image):
""" Choose image to boot from by default """
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "set_default" in sys.argv:
print_deprecation_warning("set_default", "set-default")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
raise click.Abort()
bootloader.set_default_image(image)
# Set image for next boot
@sonic_installer.command('set-next-boot')
@click.argument('image')
def set_next_boot(image):
""" Choose image for next reboot (one time action) """
# Warn the user if they are calling the deprecated version of the subcommand (with underscores instead of hyphens)
if "set_next_boot" in sys.argv:
print_deprecation_warning("set_next_boot", "set-next-boot")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
sys.exit(1)
bootloader.set_next_image(image)
# Uninstall image
@sonic_installer.command('remove')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Image will be removed, continue?')
@click.argument('image')
def remove(image):
""" Uninstall image """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
current = bootloader.get_current_image()
if image not in images:
echo_and_log('Image does not exist', LOG_ERR)
sys.exit(1)
if image == current:
echo_and_log('Cannot remove current image', LOG_ERR)
sys.exit(1)
# TODO: check if image is next boot or default boot and fix these
bootloader.remove_image(image)
# Retrieve version from binary image file and print to screen
@sonic_installer.command('binary-version')
@click.argument('binary_image_path')
def binary_version(binary_image_path):
""" Get version from local binary image file """
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "binary_version" in sys.argv:
print_deprecation_warning("binary_version", "binary-version")
bootloader = get_bootloader()
version = bootloader.get_binary_image_version(binary_image_path)
if not version:
click.echo("Image file does not exist or is not a valid SONiC image file")
sys.exit(1)
else:
click.echo(version)
# Remove installed images which are not current and next
@sonic_installer.command('cleanup')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Remove images which are not current and next, continue?')
def cleanup():
""" Remove installed images which are not current and next """
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
image_removed = 0
for image in images:
if image != curimage and image != nextimage:
echo_and_log("Removing image %s" % image)
bootloader.remove_image(image)
image_removed += 1
if image_removed == 0:
echo_and_log("No image(s) to remove")
DOCKER_CONTAINER_LIST = [
"bgp",
"dhcp_relay",
"lldp",
"macsec",
"nat",
"pmon",
"radv",
"restapi",
"sflow",
"snmp",
"swss",
"syncd",
"teamd",
"telemetry"
]
# Upgrade docker image
@sonic_installer.command('upgrade-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New docker image will be installed, continue?')
@click.option('--cleanup_image', is_flag=True, help="Clean up old docker image")
@click.option('--skip_check', is_flag=True, help="Skip task check for docker upgrade")
@click.option('--tag', type=str, help="Tag for the new docker image")
@click.option('--warm', is_flag=True, help="Perform warm upgrade")
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
@click.argument('url')
def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm):
""" Upgrade docker image from local binary or URL"""
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "upgrade_docker" in sys.argv:
print_deprecation_warning("upgrade_docker", "upgrade-docker")
image_name = get_container_image_name(container_name)
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
DEFAULT_IMAGE_PATH = os.path.join("/tmp/", image_name)
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook)
except Exception as e:
echo_and_log("Download error: {}".format(e), LOG_ERR)
raise click.Abort()
image_path = DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
# Verify that the local file exists and is a regular file
# TODO: Verify the file is a *proper Docker image file*
if not os.path.isfile(image_path):
echo_and_log("Image file '{}' does not exist or is not a regular file. Aborting...".format(image_path), LOG_ERR)
raise click.Abort()
warm_configured = False
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(host='127.0.0.1')
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
_hash = '{}{}'.format(prefix, container_name)
if state_db.get(state_db.STATE_DB, _hash, "enable") == "true":
warm_configured = True
state_db.close(state_db.STATE_DB)
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
if warm_configured is False and warm:
run_command("config warm_restart enable %s" % container_name)
# Fetch tag of current running image
tag_previous = get_docker_tag_name(image_latest)
# Load the new image beforehand to shorten disruption time
run_command("docker load < %s" % image_path)
warm_app_names = []
# warm restart specific procssing for swss, bgp and teamd dockers.
if warm_configured is True or warm:
# make sure orchagent is in clean state if swss is to be upgraded
if container_name == "swss":
skipPendingTaskCheck = ""
if skip_check:
skipPendingTaskCheck = " -s"
cmd = "docker exec -i swss orchagent_restart_check -w 2000 -r 5 " + skipPendingTaskCheck
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, err) = proc.communicate()
if proc.returncode != 0:
if not skip_check:
echo_and_log("Orchagent is not in clean state, RESTARTCHECK failed", LOG_ERR)
# Restore orignal config before exit
if warm_configured is False and warm:
run_command("config warm_restart disable %s" % container_name)
# Clean the image loaded earlier
image_id_latest = get_container_image_id(image_latest)
run_command("docker rmi -f %s" % image_id_latest)
# Re-point latest tag to previous tag
run_command("docker tag %s:%s %s" % (image_name, tag_previous, image_latest))
sys.exit(proc.returncode)
else:
echo_and_log("Orchagent is not in clean state, upgrading it anyway")
else:
echo_and_log("Orchagent is in clean state and frozen for warm upgrade")
warm_app_names = ["orchagent", "neighsyncd"]
elif container_name == "bgp":
# Kill bgpd to restart the bgp graceful restart procedure
echo_and_log("Stopping bgp ...")
run_command("docker exec -i bgp pkill -9 zebra")
run_command("docker exec -i bgp pkill -9 bgpd")
warm_app_names = ["bgp"]
echo_and_log("Stopped bgp ...")
elif container_name == "teamd":
echo_and_log("Stopping teamd ...")
# Send USR1 signal to all teamd instances to stop them
# It will prepare teamd for warm-reboot
run_command("docker exec -i teamd pkill -USR1 teamd > /dev/null")
warm_app_names = ["teamsyncd"]
echo_and_log("Stopped teamd ...")
# clean app reconcilation state from last warm start if exists
for warm_app_name in warm_app_names:
hdel_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
run_command("docker kill %s > /dev/null" % container_name)
run_command("docker rm %s " % container_name)
if tag is None:
# example image: docker-lldp-sv2:latest
tag = get_docker_tag_name(image_latest)
run_command("docker tag %s:latest %s:%s" % (image_name, image_name, tag))
run_command("systemctl restart %s" % container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
# this is image_id for image with "latest" tag
image_id_latest = get_container_image_id(image_latest)
for id in image_id_all:
if id != image_id_latest:
# Unless requested, the previoud docker image will be preserved
if not cleanup_image and id == image_id_previous:
continue
run_command("docker rmi -f %s" % id)
exp_state = "reconciled"
state = ""
# post warm restart specific procssing for swss, bgp and teamd dockers, wait for reconciliation state.
if warm_configured is True or warm:
count = 0
for warm_app_name in warm_app_names:
state = ""
# Wait up to 180 seconds for reconciled state
while state != exp_state and count < 90:
sys.stdout.write("\r {}: ".format(warm_app_name))
sys.stdout.write("[%-s" % ('='*count))
sys.stdout.flush()
count += 1
time.sleep(2)
state = hget_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
log.log_notice("%s reached %s state" % (warm_app_name, state))
sys.stdout.write("]\n\r")
if state != exp_state:
echo_and_log("%s failed to reach %s state" % (warm_app_name, exp_state), LOG_ERR)
else:
exp_state = "" # this is cold upgrade
# Restore to previous cold restart setting
if warm_configured is False and warm:
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
run_command("config warm_restart disable %s" % container_name)
if state == exp_state:
echo_and_log('Done')
else:
echo_and_log('Failed', LOG_ERR)
sys.exit(1)
# rollback docker image
@sonic_installer.command('rollback-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Docker image will be rolled back, continue?')
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
def rollback_docker(container_name):
""" Rollback docker image to previous version"""
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "rollback_docker" in sys.argv:
print_deprecation_warning("rollback_docker", "rollback-docker")
image_name = get_container_image_name(container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
if len(image_id_all) != 2:
echo_and_log("Two images required, but there are '{}' images for '{}'. Aborting...".format(len(image_id_all), image_name), LOG_ERR)
raise click.Abort()
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
version_tag = ""
for id in image_id_all:
if id != image_id_previous:
version_tag = get_docker_tag_name(id)
# make previous image as latest
run_command("docker tag %s:%s %s:latest" % (image_name, version_tag, image_name))
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR)
else:
run_command("systemctl restart %s" % container_name)
echo_and_log('Done')
# verify the next image
@sonic_installer.command('verify-next-image')
def verify_next_image():
""" Verify the next image for reboot"""
bootloader = get_bootloader()
if not bootloader.verify_next_image():
echo_and_log('Image verification failed', LOG_ERR)
sys.exit(1)
click.echo('Image successfully verified')
if __name__ == '__main__':
sonic_installer()
| 42.141907
| 150
| 0.669631
|
import configparser
import os
import re
import subprocess
import sys
import time
import utilities_common.cli as clicommon
from urllib.request import urlopen, urlretrieve
import click
from sonic_py_common import logger
from swsscommon.swsscommon import SonicV2Connector
from .bootloader import get_bootloader
from .common import (
run_command, run_command_or_raise,
IMAGE_PREFIX,
UPPERDIR_NAME,
WORKDIR_NAME,
DOCKERDIR_NAME,
)
from .exception import SonicRuntimeException
SYSLOG_IDENTIFIER = "sonic-installer"
LOG_ERR = logger.Logger.LOG_PRIORITY_ERROR
LOG_WARN = logger.Logger.LOG_PRIORITY_WARNING
LOG_NOTICE = logger.Logger.LOG_PRIORITY_NOTICE
_config = None
log = logger.Logger(SYSLOG_IDENTIFIER)
class Config(object):
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
global _config
if _config is None:
_config = Config()
# Load our config file
cfg_file = os.path.join(os.path.dirname(__file__), 'aliases.ini')
_config.read_config(cfg_file)
# Try to get builtin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# No builtin found. Look up an explicit command alias in the config
if cmd_name in _config.aliases:
actual_cmd = _config.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
#
# Helper functions
#
_start_time = None
_last_time = None
def reporthook(count, block_size, total_size):
global _start_time, _last_time
cur_time = int(time.time())
if count == 0:
_start_time = cur_time
_last_time = cur_time
return
if cur_time == _last_time:
return
_last_time = cur_time
duration = cur_time - _start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
time_left = (total_size - progress_size) / speed / 1024
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds left... " %
(percent, progress_size / (1024 * 1024), speed, time_left))
sys.stdout.flush()
# TODO: Embed tag name info into docker image meta data at build time,
# and extract tag name from docker image file.
def get_docker_tag_name(image):
# Try to get tag name from label metadata
cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
return "unknown"
tag = out.rstrip()
if tag == "<no value>":
return "unknown"
return tag
def echo_and_log(msg, priority=LOG_NOTICE, fg=None):
if priority >= LOG_ERR:
# Print to stderr if priority is error
click.secho(msg, fg=fg, err=True)
else:
click.secho(msg, fg=fg)
log.log(priority, msg, False)
# Function which validates whether a given URL specifies an existent file
# on a reachable remote machine. Will abort the current operation if not
def validate_url_or_abort(url):
# Attempt to retrieve HTTP response code
try:
urlfile = urlopen(url)
response_code = urlfile.getcode()
urlfile.close()
except IOError:
response_code = None
if not response_code:
echo_and_log("Did not receive a response from remote machine. Aborting...", LOG_ERR)
raise click.Abort()
else:
# Check for a 4xx response code which indicates a nonexistent URL
if response_code / 100 == 4:
echo_and_log("Image file not found on remote machine. Aborting...", LOG_ERR)
raise click.Abort()
# Callback for confirmation prompt. Aborts if user enters "n"
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def get_container_image_name(container_name):
# example image: docker-lldp-sv2:latest
cmd = "docker inspect --format '{{.Config.Image}}' " + container_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.exit(proc.returncode)
image_latest = out.rstrip()
# example image_name: docker-lldp-sv2
cmd = "echo " + image_latest + " | cut -d ':' -f 1"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_name = proc.stdout.read().rstrip()
return image_name
def get_container_image_id(image_tag):
# TODO: extract commond docker info fetching functions
# this is image_id for image with tag, like 'docker-teamd:latest'
cmd = "docker images --format '{{.ID}}' " + image_tag
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id = proc.stdout.read().rstrip()
return image_id
def get_container_image_id_all(image_name):
# All images id under the image name like 'docker-teamd'
cmd = "docker images --format '{{.ID}}' " + image_name
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
image_id_all = proc.stdout.read()
image_id_all = image_id_all.splitlines()
image_id_all = set(image_id_all)
return image_id_all
def hget_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hget(_hash, key)
def hdel_warm_restart_table(db_name, table_name, warm_app_name, key):
db = SonicV2Connector()
db.connect(db_name, False)
_hash = table_name + db.get_db_separator(db_name) + warm_app_name
client = db.get_redis_client(db_name)
return client.hdel(_hash, key)
def print_deprecation_warning(deprecated_cmd_or_subcmd, new_cmd_or_subcmd):
click.secho("Warning: '{}' {}command is deprecated and will be removed in the future"
.format(deprecated_cmd_or_subcmd, "" if deprecated_cmd_or_subcmd == "sonic_installer" else "sub"),
fg="red", err=True)
click.secho("Please use '{}' instead".format(new_cmd_or_subcmd), fg="red", err=True)
def mount_squash_fs(squashfs_path, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point])
def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True, raise_exception=True):
flags = []
if read_only:
flags.append("-r")
if force:
flags.append("-f")
if recursive:
flags.append("-R")
run_command_or_raise(["umount", *flags, mount_point], raise_exception=raise_exception)
if remove_dir:
run_command_or_raise(["rm", "-rf", mount_point], raise_exception=raise_exception)
def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
overlay_options = "rw,relatime,lowerdir={},upperdir={},workdir={}".format(lowerdir, upperdir, workdir)
run_command_or_raise(["mount", "overlay", "-t", "overlay", "-o", overlay_options, mount_point])
def mount_bind(source, mount_point):
run_command_or_raise(["mkdir", "-p", mount_point])
run_command_or_raise(["mount", "--bind", source, mount_point])
def mount_procfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "proc", "/proc", "-t", "proc"])
def mount_sysfs_chroot(root):
run_command_or_raise(["chroot", root, "mount", "sysfs", "/sys", "-t", "sysfs"])
def update_sonic_environment(bootloader, binary_image_version):
SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml")
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_mount = os.path.join('/', "tmp", "image-{0}-fs".format(sonic_version))
env_dir = os.path.join(new_image_dir, "sonic-config")
env_file = os.path.join(env_dir, "sonic-environment")
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE)
next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE)
sonic_env = run_command_or_raise([
"sonic-cfggen",
"-d",
"-y",
next_sonic_version_yml_file,
"-t",
next_sonic_env_template_file,
])
os.mkdir(env_dir, 0o755)
with open(env_file, "w+") as ef:
print(sonic_env, file=ef)
os.chmod(env_file, 0o644)
except SonicRuntimeException as ex:
echo_and_log("Warning: SONiC environment variables are not supported for this image: {0}".format(str(ex)), LOG_ERR, fg="red")
if os.path.exists(env_file):
os.remove(env_file)
os.rmdir(env_dir)
finally:
umount(new_image_mount)
def migrate_sonic_packages(bootloader, binary_image_version):
SONIC_PACKAGE_MANAGER = "sonic-package-manager"
PACKAGE_MANAGER_DIR = "/var/lib/sonic-package-manager/"
DOCKER_CTL_SCRIPT = "/usr/lib/docker/docker.sh"
DOCKERD_SOCK = "docker.sock"
VAR_RUN_PATH = "/var/run/"
tmp_dir = "tmp"
packages_file = "packages.json"
packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file)
sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version)
new_image_dir = bootloader.get_image_path(binary_image_version)
new_image_upper_dir = os.path.join(new_image_dir, UPPERDIR_NAME)
new_image_work_dir = os.path.join(new_image_dir, WORKDIR_NAME)
new_image_docker_dir = os.path.join(new_image_dir, DOCKERDIR_NAME)
new_image_mount = os.path.join("/", tmp_dir, "image-{0}-fs".format(sonic_version))
new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker")
if not os.path.isdir(new_image_docker_dir):
# NOTE: This codepath can be reached if the installation process did not
# extract the default dockerfs. This can happen with docker_inram
# though the bootloader class should have disabled the package
# migration which is why this message is a non fatal error message.
echo_and_log("Error: SONiC package migration cannot proceed due to missing docker folder", LOG_ERR, fg="red")
return
docker_started = False
with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path:
try:
mount_squash_fs(new_image_squashfs_path, new_image_mount)
# make sure upper dir and work dir exist
run_command_or_raise(["mkdir", "-p", new_image_upper_dir])
run_command_or_raise(["mkdir", "-p", new_image_work_dir])
mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount)
mount_bind(new_image_docker_dir, new_image_docker_mount)
mount_procfs_chroot(new_image_mount)
mount_sysfs_chroot(new_image_mount)
# Assume if docker.sh script exists we are installing Application Extension compatible image.
if not os.path.exists(os.path.join(new_image_mount, os.path.relpath(DOCKER_CTL_SCRIPT, os.path.abspath(os.sep)))):
echo_and_log("Warning: SONiC Application Extension is not supported in this image", LOG_WARN, fg="yellow")
return
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"])
docker_started = True
run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)])
run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["mount", "--bind",
os.path.join(VAR_RUN_PATH, DOCKERD_SOCK),
os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)])
run_command_or_raise(["chroot", new_image_mount, "sh", "-c", "command -v {}".format(SONIC_PACKAGE_MANAGER)])
run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate",
os.path.join("/", tmp_dir, packages_file),
"--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK),
"-y"])
finally:
if docker_started:
run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "stop"], raise_exception=False)
umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False)
umount(new_image_mount, raise_exception=False)
class SWAPAllocator(object):
SWAP_MEM_SIZE = 1024
DISK_FREESPACE_THRESHOLD = 4 * 1024
TOTAL_MEM_THRESHOLD = 2048
AVAILABLE_MEM_THRESHOLD = 1200
SWAP_FILE_PATH = '/host/swapfile'
KiB_TO_BYTES_FACTOR = 1024
MiB_TO_BYTES_FACTOR = 1024 * 1024
def __init__(self, allocate, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
self.allocate = allocate
self.swap_mem_size = SWAPAllocator.SWAP_MEM_SIZE if swap_mem_size is None else swap_mem_size
self.total_mem_threshold = SWAPAllocator.TOTAL_MEM_THRESHOLD if total_mem_threshold is None else total_mem_threshold
self.available_mem_threshold = SWAPAllocator.AVAILABLE_MEM_THRESHOLD if available_mem_threshold is None else available_mem_threshold
self.is_allocated = False
@staticmethod
def get_disk_freespace(path):
fs_stats = os.statvfs(path)
return fs_stats.f_bsize * fs_stats.f_bavail
@staticmethod
def read_from_meminfo():
meminfo = {}
with open("/proc/meminfo") as fd:
for line in fd.readlines():
if line:
fields = line.split()
if len(fields) >= 2 and fields[1].isdigit():
meminfo[fields[0].rstrip(":")] = int(fields[1])
return meminfo
def setup_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
with open(swapfile, 'wb') as fd:
os.posix_fallocate(fd.fileno(), 0, self.swap_mem_size * SWAPAllocator.MiB_TO_BYTES_FACTOR)
os.chmod(swapfile, 0o600)
run_command(f'mkswap {swapfile}; swapon {swapfile}')
def remove_swapmem(self):
swapfile = SWAPAllocator.SWAP_FILE_PATH
run_command_or_raise(['swapoff', swapfile], raise_exception=False)
try:
os.unlink(swapfile)
finally:
pass
def __enter__(self):
if self.allocate:
if self.get_disk_freespace('/host') < max(SWAPAllocator.DISK_FREESPACE_THRESHOLD, self.swap_mem_size) * SWAPAllocator.MiB_TO_BYTES_FACTOR:
echo_and_log("Failed to setup SWAP memory due to insufficient disk free space...", LOG_ERR)
return
meminfo = self.read_from_meminfo()
mem_total_in_bytes = meminfo["MemTotal"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
mem_avail_in_bytes = meminfo["MemAvailable"] * SWAPAllocator.KiB_TO_BYTES_FACTOR
if (mem_total_in_bytes < self.total_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR
or mem_avail_in_bytes < self.available_mem_threshold * SWAPAllocator.MiB_TO_BYTES_FACTOR):
echo_and_log("Setup SWAP memory")
swapfile = SWAPAllocator.SWAP_FILE_PATH
if os.path.exists(swapfile):
self.remove_swapmem()
try:
self.setup_swapmem()
except Exception:
self.remove_swapmem()
raise
self.is_allocated = True
def __exit__(self, *exc_info):
if self.is_allocated:
self.remove_swapmem()
def validate_positive_int(ctx, param, value):
if isinstance(value, int) and value > 0:
return value
raise click.BadParameter("Must be a positive integer")
# Main entrypoint
@click.group(cls=AliasedGroup)
def sonic_installer():
if os.geteuid() != 0:
exit("Root privileges required for this operation")
# Warn the user if they are calling the deprecated version of the command (with an underscore instead of a hyphen)
if os.path.basename(sys.argv[0]) == "sonic_installer":
print_deprecation_warning("sonic_installer", "sonic-installer")
# Install image
@sonic_installer.command('install')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New image will be installed, continue?')
@click.option('-f', '--force', is_flag=True,
help="Force installation of an image of a type which differs from that of the current running image")
@click.option('--skip_migration', is_flag=True,
help="Do not migrate current configuration to the newly installed image")
@click.option('--skip-package-migration', is_flag=True,
help="Do not migrate current packages to the newly installed image")
@click.option('--skip-setup-swap', is_flag=True,
help='Skip setup temporary SWAP memory used for installation')
@click.option('--swap-mem-size', default=1024, type=int, show_default='1024 MiB',
help='SWAP memory space size', callback=validate_positive_int,
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'])
@click.option('--total-mem-threshold', default=2048, type=int, show_default='2048 MiB',
help='If system total memory is lower than threshold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.option('--available-mem-threshold', default=1200, type=int, show_default='1200 MiB',
help='If system available memory is lower than threhold, setup SWAP memory',
cls=clicommon.MutuallyExclusiveOption, mutually_exclusive=['skip_setup_swap'],
callback=validate_positive_int)
@click.argument('url')
def install(url, force, skip_migration=False, skip_package_migration=False,
skip_setup_swap=False, swap_mem_size=None, total_mem_threshold=None, available_mem_threshold=None):
bootloader = get_bootloader()
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook)
click.echo('')
except Exception as e:
echo_and_log("Download error", e)
raise click.Abort()
image_path = bootloader.DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
binary_image_version = bootloader.get_binary_image_version(image_path)
if not binary_image_version:
echo_and_log("Image file does not exist or is not a valid SONiC image file", LOG_ERR)
raise click.Abort()
# Is this version already installed?
if binary_image_version in bootloader.get_installed_images():
echo_and_log("Image {} is already installed. Setting it as default...".format(binary_image_version))
if not bootloader.set_default_image(binary_image_version):
echo_and_log('Error: Failed to set image as default', LOG_ERR)
raise click.Abort()
else:
# Verify that the binary image is of the same type as the running image
if not bootloader.verify_binary_image(image_path) and not force:
echo_and_log("Image file '{}' is of a different type than running image.\n".format(url) +
"If you are sure you want to install this image, use -f|--force.\n" +
"Aborting...", LOG_ERR)
raise click.Abort()
echo_and_log("Installing image {} and setting it as default...".format(binary_image_version))
with SWAPAllocator(not skip_setup_swap, swap_mem_size, total_mem_threshold, available_mem_threshold):
bootloader.install_image(image_path)
# Take a backup of current configuration
if skip_migration:
echo_and_log("Skipping configuration migration as requested in the command option.")
else:
run_command('config-setup backup')
update_sonic_environment(bootloader, binary_image_version)
if not bootloader.supports_package_migration(binary_image_version) and not skip_package_migration:
echo_and_log("Warning: SONiC package migration is not supported for this bootloader/image", fg="yellow")
skip_package_migration = True
if not skip_package_migration:
migrate_sonic_packages(bootloader, binary_image_version)
# Finally, sync filesystem
run_command("sync;sync;sync")
run_command("sleep 3") # wait 3 seconds after sync
echo_and_log('Done')
# List installed images
@sonic_installer.command('list')
def list_command():
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
click.echo("Current: " + curimage)
click.echo("Next: " + nextimage)
click.echo("Available: ")
for image in images:
click.echo(image)
# Set default image for boot
@sonic_installer.command('set-default')
@click.argument('image')
def set_default(image):
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "set_default" in sys.argv:
print_deprecation_warning("set_default", "set-default")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
raise click.Abort()
bootloader.set_default_image(image)
# Set image for next boot
@sonic_installer.command('set-next-boot')
@click.argument('image')
def set_next_boot(image):
# Warn the user if they are calling the deprecated version of the subcommand (with underscores instead of hyphens)
if "set_next_boot" in sys.argv:
print_deprecation_warning("set_next_boot", "set-next-boot")
bootloader = get_bootloader()
if image not in bootloader.get_installed_images():
echo_and_log('Error: Image does not exist', LOG_ERR)
sys.exit(1)
bootloader.set_next_image(image)
# Uninstall image
@sonic_installer.command('remove')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Image will be removed, continue?')
@click.argument('image')
def remove(image):
bootloader = get_bootloader()
images = bootloader.get_installed_images()
current = bootloader.get_current_image()
if image not in images:
echo_and_log('Image does not exist', LOG_ERR)
sys.exit(1)
if image == current:
echo_and_log('Cannot remove current image', LOG_ERR)
sys.exit(1)
# TODO: check if image is next boot or default boot and fix these
bootloader.remove_image(image)
# Retrieve version from binary image file and print to screen
@sonic_installer.command('binary-version')
@click.argument('binary_image_path')
def binary_version(binary_image_path):
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "binary_version" in sys.argv:
print_deprecation_warning("binary_version", "binary-version")
bootloader = get_bootloader()
version = bootloader.get_binary_image_version(binary_image_path)
if not version:
click.echo("Image file does not exist or is not a valid SONiC image file")
sys.exit(1)
else:
click.echo(version)
# Remove installed images which are not current and next
@sonic_installer.command('cleanup')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Remove images which are not current and next, continue?')
def cleanup():
bootloader = get_bootloader()
images = bootloader.get_installed_images()
curimage = bootloader.get_current_image()
nextimage = bootloader.get_next_image()
image_removed = 0
for image in images:
if image != curimage and image != nextimage:
echo_and_log("Removing image %s" % image)
bootloader.remove_image(image)
image_removed += 1
if image_removed == 0:
echo_and_log("No image(s) to remove")
DOCKER_CONTAINER_LIST = [
"bgp",
"dhcp_relay",
"lldp",
"macsec",
"nat",
"pmon",
"radv",
"restapi",
"sflow",
"snmp",
"swss",
"syncd",
"teamd",
"telemetry"
]
# Upgrade docker image
@sonic_installer.command('upgrade-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='New docker image will be installed, continue?')
@click.option('--cleanup_image', is_flag=True, help="Clean up old docker image")
@click.option('--skip_check', is_flag=True, help="Skip task check for docker upgrade")
@click.option('--tag', type=str, help="Tag for the new docker image")
@click.option('--warm', is_flag=True, help="Perform warm upgrade")
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
@click.argument('url')
def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm):
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "upgrade_docker" in sys.argv:
print_deprecation_warning("upgrade_docker", "upgrade-docker")
image_name = get_container_image_name(container_name)
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
DEFAULT_IMAGE_PATH = os.path.join("/tmp/", image_name)
if url.startswith('http://') or url.startswith('https://'):
echo_and_log('Downloading image...')
validate_url_or_abort(url)
try:
urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook)
except Exception as e:
echo_and_log("Download error: {}".format(e), LOG_ERR)
raise click.Abort()
image_path = DEFAULT_IMAGE_PATH
else:
image_path = os.path.join("./", url)
# Verify that the local file exists and is a regular file
# TODO: Verify the file is a *proper Docker image file*
if not os.path.isfile(image_path):
echo_and_log("Image file '{}' does not exist or is not a regular file. Aborting...".format(image_path), LOG_ERR)
raise click.Abort()
warm_configured = False
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(host='127.0.0.1')
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
_hash = '{}{}'.format(prefix, container_name)
if state_db.get(state_db.STATE_DB, _hash, "enable") == "true":
warm_configured = True
state_db.close(state_db.STATE_DB)
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
if warm_configured is False and warm:
run_command("config warm_restart enable %s" % container_name)
# Fetch tag of current running image
tag_previous = get_docker_tag_name(image_latest)
# Load the new image beforehand to shorten disruption time
run_command("docker load < %s" % image_path)
warm_app_names = []
# warm restart specific procssing for swss, bgp and teamd dockers.
if warm_configured is True or warm:
# make sure orchagent is in clean state if swss is to be upgraded
if container_name == "swss":
skipPendingTaskCheck = ""
if skip_check:
skipPendingTaskCheck = " -s"
cmd = "docker exec -i swss orchagent_restart_check -w 2000 -r 5 " + skipPendingTaskCheck
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True)
(out, err) = proc.communicate()
if proc.returncode != 0:
if not skip_check:
echo_and_log("Orchagent is not in clean state, RESTARTCHECK failed", LOG_ERR)
# Restore orignal config before exit
if warm_configured is False and warm:
run_command("config warm_restart disable %s" % container_name)
# Clean the image loaded earlier
image_id_latest = get_container_image_id(image_latest)
run_command("docker rmi -f %s" % image_id_latest)
# Re-point latest tag to previous tag
run_command("docker tag %s:%s %s" % (image_name, tag_previous, image_latest))
sys.exit(proc.returncode)
else:
echo_and_log("Orchagent is not in clean state, upgrading it anyway")
else:
echo_and_log("Orchagent is in clean state and frozen for warm upgrade")
warm_app_names = ["orchagent", "neighsyncd"]
elif container_name == "bgp":
# Kill bgpd to restart the bgp graceful restart procedure
echo_and_log("Stopping bgp ...")
run_command("docker exec -i bgp pkill -9 zebra")
run_command("docker exec -i bgp pkill -9 bgpd")
warm_app_names = ["bgp"]
echo_and_log("Stopped bgp ...")
elif container_name == "teamd":
echo_and_log("Stopping teamd ...")
# Send USR1 signal to all teamd instances to stop them
# It will prepare teamd for warm-reboot
run_command("docker exec -i teamd pkill -USR1 teamd > /dev/null")
warm_app_names = ["teamsyncd"]
echo_and_log("Stopped teamd ...")
# clean app reconcilation state from last warm start if exists
for warm_app_name in warm_app_names:
hdel_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
run_command("docker kill %s > /dev/null" % container_name)
run_command("docker rm %s " % container_name)
if tag is None:
# example image: docker-lldp-sv2:latest
tag = get_docker_tag_name(image_latest)
run_command("docker tag %s:latest %s:%s" % (image_name, image_name, tag))
run_command("systemctl restart %s" % container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
# this is image_id for image with "latest" tag
image_id_latest = get_container_image_id(image_latest)
for id in image_id_all:
if id != image_id_latest:
# Unless requested, the previoud docker image will be preserved
if not cleanup_image and id == image_id_previous:
continue
run_command("docker rmi -f %s" % id)
exp_state = "reconciled"
state = ""
# post warm restart specific procssing for swss, bgp and teamd dockers, wait for reconciliation state.
if warm_configured is True or warm:
count = 0
for warm_app_name in warm_app_names:
state = ""
# Wait up to 180 seconds for reconciled state
while state != exp_state and count < 90:
sys.stdout.write("\r {}: ".format(warm_app_name))
sys.stdout.write("[%-s" % ('='*count))
sys.stdout.flush()
count += 1
time.sleep(2)
state = hget_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state")
log.log_notice("%s reached %s state" % (warm_app_name, state))
sys.stdout.write("]\n\r")
if state != exp_state:
echo_and_log("%s failed to reach %s state" % (warm_app_name, exp_state), LOG_ERR)
else:
exp_state = "" # this is cold upgrade
# Restore to previous cold restart setting
if warm_configured is False and warm:
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
run_command("config warm_restart disable %s" % container_name)
if state == exp_state:
echo_and_log('Done')
else:
echo_and_log('Failed', LOG_ERR)
sys.exit(1)
# rollback docker image
@sonic_installer.command('rollback-docker')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False, prompt='Docker image will be rolled back, continue?')
@click.argument('container_name', metavar='<container_name>', required=True,
type=click.Choice(DOCKER_CONTAINER_LIST))
def rollback_docker(container_name):
# Warn the user if they are calling the deprecated version of the subcommand (with an underscore instead of a hyphen)
if "rollback_docker" in sys.argv:
print_deprecation_warning("rollback_docker", "rollback-docker")
image_name = get_container_image_name(container_name)
# All images id under the image name
image_id_all = get_container_image_id_all(image_name)
if len(image_id_all) != 2:
echo_and_log("Two images required, but there are '{}' images for '{}'. Aborting...".format(len(image_id_all), image_name), LOG_ERR)
raise click.Abort()
image_latest = image_name + ":latest"
image_id_previous = get_container_image_id(image_latest)
version_tag = ""
for id in image_id_all:
if id != image_id_previous:
version_tag = get_docker_tag_name(id)
# make previous image as latest
run_command("docker tag %s:%s %s:latest" % (image_name, version_tag, image_name))
if container_name == "swss" or container_name == "bgp" or container_name == "teamd":
echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR)
else:
run_command("systemctl restart %s" % container_name)
echo_and_log('Done')
# verify the next image
@sonic_installer.command('verify-next-image')
def verify_next_image():
bootloader = get_bootloader()
if not bootloader.verify_next_image():
echo_and_log('Image verification failed', LOG_ERR)
sys.exit(1)
click.echo('Image successfully verified')
if __name__ == '__main__':
sonic_installer()
| true
| true
|
7902242e402c503e54f106a528684f979953f800
| 4,952
|
py
|
Python
|
fastapi_users/db/tortoise.py
|
okadath/sawfish_users
|
bf9d8c28a4c924656fa3197661603cbdfd15bce1
|
[
"MIT"
] | 1
|
2021-07-29T15:53:22.000Z
|
2021-07-29T15:53:22.000Z
|
fastapi_users/db/tortoise.py
|
okadath/sawfish_users
|
bf9d8c28a4c924656fa3197661603cbdfd15bce1
|
[
"MIT"
] | 141
|
2020-11-29T19:37:16.000Z
|
2022-03-18T04:25:04.000Z
|
fastapi_users/db/tortoise.py
|
SeaMLessNuke/fastapi-users
|
7186554e30eed2fe3c02b3b097618b200b47081d
|
[
"MIT"
] | 1
|
2020-11-21T13:05:08.000Z
|
2020-11-21T13:05:08.000Z
|
from typing import Optional, Type
from pydantic import UUID4
from tortoise import fields, models
from tortoise.exceptions import DoesNotExist
from fastapi_users.db.base import BaseUserDatabase
from fastapi_users.models import UD
class TortoiseBaseUserModel(models.Model):
id = fields.UUIDField(pk=True, generated=False)
email = fields.CharField(index=True, unique=True, null=False, max_length=255)
hashed_password = fields.CharField(null=False, max_length=255)
is_active = fields.BooleanField(default=True, null=False)
is_superuser = fields.BooleanField(default=False, null=False)
async def to_dict(self):
d = {}
for field in self._meta.db_fields:
d[field] = getattr(self, field)
for field in self._meta.backward_fk_fields:
d[field] = await getattr(self, field).all().values()
return d
class Meta:
abstract = True
class TortoiseBaseOAuthAccountModel(models.Model):
id = fields.UUIDField(pk=True, generated=False, max_length=255)
oauth_name = fields.CharField(null=False, max_length=255)
access_token = fields.CharField(null=False, max_length=255)
expires_at = fields.IntField(null=False)
refresh_token = fields.CharField(null=True, max_length=255)
account_id = fields.CharField(index=True, null=False, max_length=255)
account_email = fields.CharField(null=False, max_length=255)
class Meta:
abstract = True
class TortoiseUserDatabase(BaseUserDatabase[UD]):
"""
Database adapter for Tortoise ORM.
:param user_db_model: Pydantic model of a DB representation of a user.
:param model: Tortoise ORM model.
:param oauth_account_model: Optional Tortoise ORM model of a OAuth account.
"""
model: Type[TortoiseBaseUserModel]
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]]
def __init__(
self,
user_db_model: Type[UD],
model: Type[TortoiseBaseUserModel],
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]] = None,
):
super().__init__(user_db_model)
self.model = model
self.oauth_account_model = oauth_account_model
async def get(self, id: UUID4) -> Optional[UD]:
try:
query = self.model.get(id=id)
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def get_by_email(self, email: str) -> Optional[UD]:
query = self.model.filter(email__iexact=email).first()
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
if user is None:
return None
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
async def get_by_oauth_account(self, oauth: str, account_id: str) -> Optional[UD]:
try:
query = self.model.get(
oauth_accounts__oauth_name=oauth, oauth_accounts__account_id=account_id
).prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def create(self, user: UD) -> UD:
user_dict = user.dict()
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = self.model(**user_dict)
await model.save()
if oauth_accounts and self.oauth_account_model:
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def update(self, user: UD) -> UD:
user_dict = user.dict()
user_dict.pop("id") # Tortoise complains if we pass the PK again
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = await self.model.get(id=user.id)
for field in user_dict:
setattr(model, field, user_dict[field])
await model.save()
if oauth_accounts and self.oauth_account_model:
await model.oauth_accounts.all().delete()
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def delete(self, user: UD) -> None:
await self.model.filter(id=user.id).delete()
| 33.917808
| 87
| 0.657108
|
from typing import Optional, Type
from pydantic import UUID4
from tortoise import fields, models
from tortoise.exceptions import DoesNotExist
from fastapi_users.db.base import BaseUserDatabase
from fastapi_users.models import UD
class TortoiseBaseUserModel(models.Model):
id = fields.UUIDField(pk=True, generated=False)
email = fields.CharField(index=True, unique=True, null=False, max_length=255)
hashed_password = fields.CharField(null=False, max_length=255)
is_active = fields.BooleanField(default=True, null=False)
is_superuser = fields.BooleanField(default=False, null=False)
async def to_dict(self):
d = {}
for field in self._meta.db_fields:
d[field] = getattr(self, field)
for field in self._meta.backward_fk_fields:
d[field] = await getattr(self, field).all().values()
return d
class Meta:
abstract = True
class TortoiseBaseOAuthAccountModel(models.Model):
id = fields.UUIDField(pk=True, generated=False, max_length=255)
oauth_name = fields.CharField(null=False, max_length=255)
access_token = fields.CharField(null=False, max_length=255)
expires_at = fields.IntField(null=False)
refresh_token = fields.CharField(null=True, max_length=255)
account_id = fields.CharField(index=True, null=False, max_length=255)
account_email = fields.CharField(null=False, max_length=255)
class Meta:
abstract = True
class TortoiseUserDatabase(BaseUserDatabase[UD]):
model: Type[TortoiseBaseUserModel]
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]]
def __init__(
self,
user_db_model: Type[UD],
model: Type[TortoiseBaseUserModel],
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]] = None,
):
super().__init__(user_db_model)
self.model = model
self.oauth_account_model = oauth_account_model
async def get(self, id: UUID4) -> Optional[UD]:
try:
query = self.model.get(id=id)
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def get_by_email(self, email: str) -> Optional[UD]:
query = self.model.filter(email__iexact=email).first()
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
if user is None:
return None
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
async def get_by_oauth_account(self, oauth: str, account_id: str) -> Optional[UD]:
try:
query = self.model.get(
oauth_accounts__oauth_name=oauth, oauth_accounts__account_id=account_id
).prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def create(self, user: UD) -> UD:
user_dict = user.dict()
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = self.model(**user_dict)
await model.save()
if oauth_accounts and self.oauth_account_model:
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def update(self, user: UD) -> UD:
user_dict = user.dict()
user_dict.pop("id")
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = await self.model.get(id=user.id)
for field in user_dict:
setattr(model, field, user_dict[field])
await model.save()
if oauth_accounts and self.oauth_account_model:
await model.oauth_accounts.all().delete()
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def delete(self, user: UD) -> None:
await self.model.filter(id=user.id).delete()
| true
| true
|
79022467c277493c78b0c32ba69ea2b3b273c78b
| 387
|
py
|
Python
|
Python/grafico_3d.py
|
filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R
|
c9b72f93b2a6ead49641d765fe2a0f23ffb4b1bf
|
[
"MIT"
] | null | null | null |
Python/grafico_3d.py
|
filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R
|
c9b72f93b2a6ead49641d765fe2a0f23ffb4b1bf
|
[
"MIT"
] | null | null | null |
Python/grafico_3d.py
|
filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R
|
c9b72f93b2a6ead49641d765fe2a0f23ffb4b1bf
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
base = pd.read_csv('orchard.csv')
figura = plt.figure()
eixo = figura.add_subplot(1, 1, 1, projection = '3d')
eixo.scatter(base.decrease, base.rowpos, base.colpos)
eixo.set_xlabel('decrease')
eixo.set_ylabel('rowpos')
eixo.set_zlabel('colpos')
# cores
# https://pythonspot.com/3d-scatterplot/
| 25.8
| 53
| 0.75969
|
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
base = pd.read_csv('orchard.csv')
figura = plt.figure()
eixo = figura.add_subplot(1, 1, 1, projection = '3d')
eixo.scatter(base.decrease, base.rowpos, base.colpos)
eixo.set_xlabel('decrease')
eixo.set_ylabel('rowpos')
eixo.set_zlabel('colpos')
| true
| true
|
7902253815a07aa00bdbf5c9e890e7ad8006f0c7
| 14,112
|
py
|
Python
|
pysnmp-with-texts/NOKIA-HWM-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/NOKIA-HWM-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/NOKIA-HWM-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module NOKIA-HWM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NOKIA-HWM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:23:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ntcHWMibs, ntcHWReqs, ntcCommonModules = mibBuilder.importSymbols("NOKIA-COMMON-MIB-OID-REGISTRATION-MIB", "ntcHWMibs", "ntcHWReqs", "ntcCommonModules")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, IpAddress, TimeTicks, ModuleIdentity, MibIdentifier, Unsigned32, Counter32, NotificationType, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "IpAddress", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Unsigned32", "Counter32", "NotificationType", "iso", "Bits")
AutonomousType, TextualConvention, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "AutonomousType", "TextualConvention", "TimeStamp", "DisplayString")
ntcHWModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 94, 1, 16, 5, 1))
ntcHWModule.setRevisions(('1998-08-24 00:00', '1998-09-03 00:00', '1998-09-24 00:00', '1998-10-04 00:00', '1999-01-08 00:00', '1999-08-05 00:00', '1999-10-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ntcHWModule.setRevisionsDescriptions(('Rev 0.1 August 24, 1998 Initial version - ready for review', 'Rev 0.2 September 3, 1998 Initial review by Tero Soukko whose comments have been incorporated.', 'Rev 0.3 September 24, 1998 ready for initial review.', 'Rev 0.4 Updated anchors to use values registered by Mika Kiikkila.', 'Rev 1.0 Syntax of ntcHWLastChangedTime changed from DateAndTime to TimeStamp. Traps commented out because they are part of Nokia Common Alarm MIB.', 'Rev 1.01 Those IMPORTS which are not used are removed. Groups ntcHWSlots and ntcHWEventGroup which are not defined in this module are removed. The name NokiaHwmSlotEntry is changed to NtcHWSlotEntry on account of convenience. All notification definions before out-commented removed. Some esthetic modifications made.', "Comment 'The NMS is not allowed to set the value of ntcHWAdminstate to missing.' added to the ntcHWAdminstate's description.",))
if mibBuilder.loadTexts: ntcHWModule.setLastUpdated('9901080000Z')
if mibBuilder.loadTexts: ntcHWModule.setOrganization('Nokia')
if mibBuilder.loadTexts: ntcHWModule.setContactInfo('Anna-Kaisa Lindfors Nokia Telecommunications Oy Hiomotie 5, FIN-00380 Helsinki +358-9-51121 anna-kaisa.lindfors@nokia.com')
if mibBuilder.loadTexts: ntcHWModule.setDescription('The MIB module that is used to control the Hardware Management information.')
ntcHWObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1))
ntcHWEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 2, 0))
ntcHWGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1))
ntcHWCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2))
ntcHWUnitTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1), )
if mibBuilder.loadTexts: ntcHWUnitTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitTable.setDescription("A table which contains an entry for each pluggable circuit board (in this MIB a 'unit' is the same as a pluggable circuit board.) Entries of this table are automatically created by the hardware management software.")
ntcHWUnitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWUnitEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntry.setDescription('A conceptual row in the ntcHWUnitTable. Rows are created automatically by the Hardware Management software.')
ntcHWAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inService", 1), ("outOfService", 2), ("inTest", 3), ("missing", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWAdminState.setStatus('current')
if mibBuilder.loadTexts: ntcHWAdminState.setDescription('Represents the desired state of the unit. inService indicates that the unit is intended to be operating normally. outOfService indicates that the unit should be taken out of normal operating mode and no data traffic should appear in this unit. inTest indicates that the unit should be placed into a selftest mode. missing indicates that the unit is expected to be present but has been detected as not being physically present. The NMS is not allowed to set the value of ntcHWAdminstate to missing.')
ntcHWOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWOperState.setStatus('current')
if mibBuilder.loadTexts: ntcHWOperState.setDescription('Indicates the current state of the unit. down indicates that the unit is in a non-functional state. up indicates that the unit is functioning normally.')
ntcHWAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("inCharge", 1), ("applicationStarting", 2), ("applicationShutdown", 3), ("platformStarting", 4), ("resetting", 5), ("separated", 6), ("unconfigured", 7), ("testing", 8), ("standby", 9), ("dormant", 10), ("unavailable", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setStatus('current')
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setDescription("Provides more specific information on the state of the unit in this conceptual row. The status column has eleven defined values: inCharge = the unit is fully operational and ready to perform its desired tasks; applicationStarting = the application software is starting up; applicationShutdown = the application software is shutting down; platformStarting = Basic platform software is starting up; resetting = the disk files are closed and hardware reset is forced; separated = Only basic OS software is running. The unit can start application software on request; unconfigured = The administrative state of the unit is 'missing', disk files are closed and only basic OS software is running. The unit refuses to start application software; testing = Selftests can be performed, only basic OS are running; standby = The unit is redundant and is fully operational but not in charge of operations. It is ready to move to 'inCharge' state when necessary; dormant = All connections are physically inactive to enable removal of the unit without electric disturbance in the backplane. Only watchdog software is running for a short duration of time; unavailable = The unit is not physically present or cannot be contacted.")
ntcHWRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("reset", 1), ("hotRestart", 2), ("detach", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWRestart.setStatus('current')
if mibBuilder.loadTexts: ntcHWRestart.setDescription('Provides the ability to reset or perform a hot restart the unit represented by this conceptual row. reset = the Unit is shutdown in an orderly manner and restarted again via hardware reset; hotRestart = only the software in a unit is restarted, a hardware reset is not initiated; detach = all electrical connections of the unit are forced to an inactive state to enable removal of the unit without electrical disturbance in the backplane.')
ntcHWLedState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("red", 1), ("yellow", 2), ("black", 3), ("green", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLedState.setStatus('current')
if mibBuilder.loadTexts: ntcHWLedState.setDescription('Indicates the current LED color of the unit represented by this conceptual row.')
ntcHWSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWSerialNumber.setStatus('current')
if mibBuilder.loadTexts: ntcHWSerialNumber.setDescription('The units serial number in displayable format.')
ntcHWProductionDate = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWProductionDate.setStatus('current')
if mibBuilder.loadTexts: ntcHWProductionDate.setDescription('The units production date in displayable format.')
ntcHWUnitEntryChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setDescription('Represents the value of sysUpTime at the instant that this conceptual row entry has changed.')
ntcHWSlotTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2), )
if mibBuilder.loadTexts: ntcHWSlotTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotTable.setDescription('Table whose entries represent the expected circuit board type. The entries are created automatically by the hardware management software.')
ntcHWSlotEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWSlotEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotEntry.setDescription('The logical row describing the expected circiut board type of a slot.')
ntcHWDesiredUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1, 2), AutonomousType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setStatus('current')
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setDescription("The unit type which is expected to be inserted or present in the current slot. An indication of the vendor-specific hardware type of the HWM entity. Note that this is different from the definition of MIB-II's sysObjectID. An agent should set this object to a enterprise-specific registration identifier value indicating the specific equipment type in detail. If no vendor-specific registration identifier exists for this entity, or the value is unknown by this agent, then the value { 0 0 } is returned.")
ntcHWLastChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLastChangedTime.setStatus('current')
if mibBuilder.loadTexts: ntcHWLastChangedTime.setDescription('The value of sysUpTime at the time any of these events occur: * any instance in the following object changes value: - hwmUnitEntryChanged This object shall be set to value 0 in startup.')
ntcHWLoadInventoryContainer = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setStatus('current')
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setDescription('Writing any value to this object will cause the hardware management software to reread its configuration file from disk.')
ntcHWUnits = ObjectGroup((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1, 1)).setObjects(("NOKIA-HWM-MIB", "ntcHWAdminState"), ("NOKIA-HWM-MIB", "ntcHWOperState"), ("NOKIA-HWM-MIB", "ntcHWAvailabilityStatus"), ("NOKIA-HWM-MIB", "ntcHWRestart"), ("NOKIA-HWM-MIB", "ntcHWLedState"), ("NOKIA-HWM-MIB", "ntcHWSerialNumber"), ("NOKIA-HWM-MIB", "ntcHWProductionDate"), ("NOKIA-HWM-MIB", "ntcHWUnitEntryChanged"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWUnits = ntcHWUnits.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnits.setDescription('A collection of objects representing the status of a unit.')
ntcHWCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2, 1)).setObjects(("ENTITY-MIB", "entityPhysicalGroup"), ("NOKIA-HWM-MIB", "ntcHWUnits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWCompliance = ntcHWCompliance.setStatus('current')
if mibBuilder.loadTexts: ntcHWCompliance.setDescription('The compliance statement Hardware Management.')
mibBuilder.exportSymbols("NOKIA-HWM-MIB", ntcHWCompliance=ntcHWCompliance, ntcHWLedState=ntcHWLedState, ntcHWDesiredUnitType=ntcHWDesiredUnitType, ntcHWLastChangedTime=ntcHWLastChangedTime, ntcHWSlotEntry=ntcHWSlotEntry, ntcHWUnits=ntcHWUnits, ntcHWUnitEntry=ntcHWUnitEntry, ntcHWUnitEntryChanged=ntcHWUnitEntryChanged, ntcHWUnitTable=ntcHWUnitTable, ntcHWProductionDate=ntcHWProductionDate, ntcHWLoadInventoryContainer=ntcHWLoadInventoryContainer, ntcHWGroups=ntcHWGroups, ntcHWCompliances=ntcHWCompliances, ntcHWModule=ntcHWModule, ntcHWOperState=ntcHWOperState, ntcHWRestart=ntcHWRestart, ntcHWEvents=ntcHWEvents, ntcHWAvailabilityStatus=ntcHWAvailabilityStatus, ntcHWAdminState=ntcHWAdminState, ntcHWSlotTable=ntcHWSlotTable, ntcHWSerialNumber=ntcHWSerialNumber, ntcHWObjs=ntcHWObjs, PYSNMP_MODULE_ID=ntcHWModule)
| 168
| 1,280
| 0.772321
|
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ntcHWMibs, ntcHWReqs, ntcCommonModules = mibBuilder.importSymbols("NOKIA-COMMON-MIB-OID-REGISTRATION-MIB", "ntcHWMibs", "ntcHWReqs", "ntcCommonModules")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, IpAddress, TimeTicks, ModuleIdentity, MibIdentifier, Unsigned32, Counter32, NotificationType, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "IpAddress", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Unsigned32", "Counter32", "NotificationType", "iso", "Bits")
AutonomousType, TextualConvention, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "AutonomousType", "TextualConvention", "TimeStamp", "DisplayString")
ntcHWModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 94, 1, 16, 5, 1))
ntcHWModule.setRevisions(('1998-08-24 00:00', '1998-09-03 00:00', '1998-09-24 00:00', '1998-10-04 00:00', '1999-01-08 00:00', '1999-08-05 00:00', '1999-10-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ntcHWModule.setRevisionsDescriptions(('Rev 0.1 August 24, 1998 Initial version - ready for review', 'Rev 0.2 September 3, 1998 Initial review by Tero Soukko whose comments have been incorporated.', 'Rev 0.3 September 24, 1998 ready for initial review.', 'Rev 0.4 Updated anchors to use values registered by Mika Kiikkila.', 'Rev 1.0 Syntax of ntcHWLastChangedTime changed from DateAndTime to TimeStamp. Traps commented out because they are part of Nokia Common Alarm MIB.', 'Rev 1.01 Those IMPORTS which are not used are removed. Groups ntcHWSlots and ntcHWEventGroup which are not defined in this module are removed. The name NokiaHwmSlotEntry is changed to NtcHWSlotEntry on account of convenience. All notification definions before out-commented removed. Some esthetic modifications made.', "Comment 'The NMS is not allowed to set the value of ntcHWAdminstate to missing.' added to the ntcHWAdminstate's description.",))
if mibBuilder.loadTexts: ntcHWModule.setLastUpdated('9901080000Z')
if mibBuilder.loadTexts: ntcHWModule.setOrganization('Nokia')
if mibBuilder.loadTexts: ntcHWModule.setContactInfo('Anna-Kaisa Lindfors Nokia Telecommunications Oy Hiomotie 5, FIN-00380 Helsinki +358-9-51121 anna-kaisa.lindfors@nokia.com')
if mibBuilder.loadTexts: ntcHWModule.setDescription('The MIB module that is used to control the Hardware Management information.')
ntcHWObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1))
ntcHWEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 2, 0))
ntcHWGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1))
ntcHWCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2))
ntcHWUnitTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1), )
if mibBuilder.loadTexts: ntcHWUnitTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitTable.setDescription("A table which contains an entry for each pluggable circuit board (in this MIB a 'unit' is the same as a pluggable circuit board.) Entries of this table are automatically created by the hardware management software.")
ntcHWUnitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWUnitEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntry.setDescription('A conceptual row in the ntcHWUnitTable. Rows are created automatically by the Hardware Management software.')
ntcHWAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inService", 1), ("outOfService", 2), ("inTest", 3), ("missing", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWAdminState.setStatus('current')
if mibBuilder.loadTexts: ntcHWAdminState.setDescription('Represents the desired state of the unit. inService indicates that the unit is intended to be operating normally. outOfService indicates that the unit should be taken out of normal operating mode and no data traffic should appear in this unit. inTest indicates that the unit should be placed into a selftest mode. missing indicates that the unit is expected to be present but has been detected as not being physically present. The NMS is not allowed to set the value of ntcHWAdminstate to missing.')
ntcHWOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWOperState.setStatus('current')
if mibBuilder.loadTexts: ntcHWOperState.setDescription('Indicates the current state of the unit. down indicates that the unit is in a non-functional state. up indicates that the unit is functioning normally.')
ntcHWAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("inCharge", 1), ("applicationStarting", 2), ("applicationShutdown", 3), ("platformStarting", 4), ("resetting", 5), ("separated", 6), ("unconfigured", 7), ("testing", 8), ("standby", 9), ("dormant", 10), ("unavailable", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setStatus('current')
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setDescription("Provides more specific information on the state of the unit in this conceptual row. The status column has eleven defined values: inCharge = the unit is fully operational and ready to perform its desired tasks; applicationStarting = the application software is starting up; applicationShutdown = the application software is shutting down; platformStarting = Basic platform software is starting up; resetting = the disk files are closed and hardware reset is forced; separated = Only basic OS software is running. The unit can start application software on request; unconfigured = The administrative state of the unit is 'missing', disk files are closed and only basic OS software is running. The unit refuses to start application software; testing = Selftests can be performed, only basic OS are running; standby = The unit is redundant and is fully operational but not in charge of operations. It is ready to move to 'inCharge' state when necessary; dormant = All connections are physically inactive to enable removal of the unit without electric disturbance in the backplane. Only watchdog software is running for a short duration of time; unavailable = The unit is not physically present or cannot be contacted.")
ntcHWRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("reset", 1), ("hotRestart", 2), ("detach", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWRestart.setStatus('current')
if mibBuilder.loadTexts: ntcHWRestart.setDescription('Provides the ability to reset or perform a hot restart the unit represented by this conceptual row. reset = the Unit is shutdown in an orderly manner and restarted again via hardware reset; hotRestart = only the software in a unit is restarted, a hardware reset is not initiated; detach = all electrical connections of the unit are forced to an inactive state to enable removal of the unit without electrical disturbance in the backplane.')
ntcHWLedState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("red", 1), ("yellow", 2), ("black", 3), ("green", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLedState.setStatus('current')
if mibBuilder.loadTexts: ntcHWLedState.setDescription('Indicates the current LED color of the unit represented by this conceptual row.')
ntcHWSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWSerialNumber.setStatus('current')
if mibBuilder.loadTexts: ntcHWSerialNumber.setDescription('The units serial number in displayable format.')
ntcHWProductionDate = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWProductionDate.setStatus('current')
if mibBuilder.loadTexts: ntcHWProductionDate.setDescription('The units production date in displayable format.')
ntcHWUnitEntryChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setDescription('Represents the value of sysUpTime at the instant that this conceptual row entry has changed.')
ntcHWSlotTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2), )
if mibBuilder.loadTexts: ntcHWSlotTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotTable.setDescription('Table whose entries represent the expected circuit board type. The entries are created automatically by the hardware management software.')
ntcHWSlotEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWSlotEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotEntry.setDescription('The logical row describing the expected circiut board type of a slot.')
ntcHWDesiredUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1, 2), AutonomousType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setStatus('current')
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setDescription("The unit type which is expected to be inserted or present in the current slot. An indication of the vendor-specific hardware type of the HWM entity. Note that this is different from the definition of MIB-II's sysObjectID. An agent should set this object to a enterprise-specific registration identifier value indicating the specific equipment type in detail. If no vendor-specific registration identifier exists for this entity, or the value is unknown by this agent, then the value { 0 0 } is returned.")
ntcHWLastChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLastChangedTime.setStatus('current')
if mibBuilder.loadTexts: ntcHWLastChangedTime.setDescription('The value of sysUpTime at the time any of these events occur: * any instance in the following object changes value: - hwmUnitEntryChanged This object shall be set to value 0 in startup.')
ntcHWLoadInventoryContainer = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setStatus('current')
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setDescription('Writing any value to this object will cause the hardware management software to reread its configuration file from disk.')
ntcHWUnits = ObjectGroup((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1, 1)).setObjects(("NOKIA-HWM-MIB", "ntcHWAdminState"), ("NOKIA-HWM-MIB", "ntcHWOperState"), ("NOKIA-HWM-MIB", "ntcHWAvailabilityStatus"), ("NOKIA-HWM-MIB", "ntcHWRestart"), ("NOKIA-HWM-MIB", "ntcHWLedState"), ("NOKIA-HWM-MIB", "ntcHWSerialNumber"), ("NOKIA-HWM-MIB", "ntcHWProductionDate"), ("NOKIA-HWM-MIB", "ntcHWUnitEntryChanged"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWUnits = ntcHWUnits.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnits.setDescription('A collection of objects representing the status of a unit.')
ntcHWCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2, 1)).setObjects(("ENTITY-MIB", "entityPhysicalGroup"), ("NOKIA-HWM-MIB", "ntcHWUnits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWCompliance = ntcHWCompliance.setStatus('current')
if mibBuilder.loadTexts: ntcHWCompliance.setDescription('The compliance statement Hardware Management.')
mibBuilder.exportSymbols("NOKIA-HWM-MIB", ntcHWCompliance=ntcHWCompliance, ntcHWLedState=ntcHWLedState, ntcHWDesiredUnitType=ntcHWDesiredUnitType, ntcHWLastChangedTime=ntcHWLastChangedTime, ntcHWSlotEntry=ntcHWSlotEntry, ntcHWUnits=ntcHWUnits, ntcHWUnitEntry=ntcHWUnitEntry, ntcHWUnitEntryChanged=ntcHWUnitEntryChanged, ntcHWUnitTable=ntcHWUnitTable, ntcHWProductionDate=ntcHWProductionDate, ntcHWLoadInventoryContainer=ntcHWLoadInventoryContainer, ntcHWGroups=ntcHWGroups, ntcHWCompliances=ntcHWCompliances, ntcHWModule=ntcHWModule, ntcHWOperState=ntcHWOperState, ntcHWRestart=ntcHWRestart, ntcHWEvents=ntcHWEvents, ntcHWAvailabilityStatus=ntcHWAvailabilityStatus, ntcHWAdminState=ntcHWAdminState, ntcHWSlotTable=ntcHWSlotTable, ntcHWSerialNumber=ntcHWSerialNumber, ntcHWObjs=ntcHWObjs, PYSNMP_MODULE_ID=ntcHWModule)
| true
| true
|
790225e25d838f46b818488245b28e44b81fde95
| 710
|
py
|
Python
|
warehouse/views.py
|
thiagolcmelo/dynamic
|
9e4e71dd25ce3c778b17b62ef4062273d244a5ac
|
[
"MIT"
] | null | null | null |
warehouse/views.py
|
thiagolcmelo/dynamic
|
9e4e71dd25ce3c778b17b62ef4062273d244a5ac
|
[
"MIT"
] | null | null | null |
warehouse/views.py
|
thiagolcmelo/dynamic
|
9e4e71dd25ce3c778b17b62ef4062273d244a5ac
|
[
"MIT"
] | null | null | null |
# third-party
from flask import render_template, url_for, request, jsonify
# locals
from . import warehouse
@warehouse.route('/element_types', methods=['GET'])
def index():
return render_template("warehouse/element_types.html")
@warehouse.route('/element_type', methods=['POST'])
def create_new_element_type():
print(request.__dict__)
print(request.data)
print(request.get_json())
return jsonify({
"success": True
})
# @warehouse.route('/element_type', methods=['GET'])
# @warehouse.route('/element_type/<element_type_id>', methods=['GET'])
# def element_type(element_type_id=None):
# pass
# @warehouse.route('/element_type', methods=['POST'])
# def new_element_type()
| 26.296296
| 70
| 0.707042
|
from flask import render_template, url_for, request, jsonify
from . import warehouse
@warehouse.route('/element_types', methods=['GET'])
def index():
return render_template("warehouse/element_types.html")
@warehouse.route('/element_type', methods=['POST'])
def create_new_element_type():
print(request.__dict__)
print(request.data)
print(request.get_json())
return jsonify({
"success": True
})
| true
| true
|
790227386ce142fb8ecd7023bdcfa79290713f62
| 9,624
|
py
|
Python
|
SimPEG/electromagnetics/natural_source/survey.py
|
xli94/simpeg
|
bf765d73d1c104805e30bcd062426e36a1d48d51
|
[
"MIT"
] | 1
|
2021-12-09T18:33:24.000Z
|
2021-12-09T18:33:24.000Z
|
SimPEG/electromagnetics/natural_source/survey.py
|
albertoakel/teste_mgeo
|
a0078301f7dfd54431d6b51abcf092079ad0e5a3
|
[
"MIT"
] | null | null | null |
SimPEG/electromagnetics/natural_source/survey.py
|
albertoakel/teste_mgeo
|
a0078301f7dfd54431d6b51abcf092079ad0e5a3
|
[
"MIT"
] | 1
|
2021-07-30T19:15:23.000Z
|
2021-07-30T19:15:23.000Z
|
import sys
import numpy as np
from numpy.lib import recfunctions as recFunc
from ..frequency_domain.survey import Survey
from ...data import Data as BaseData
from ...utils import mkvc
from .sources import Planewave_xy_1Dprimary, Planewave_xy_1DhomotD
from .receivers import Point3DImpedance, Point3DTipper
from .utils.plot_utils import DataNSEMPlotMethods
#########
# Survey
#########
# class Survey(BaseSurvey):
# """
# Survey class for NSEM.
# **Requried**
# :param list srcList: List of sources associated with the survey
# **Optional**
# """
# srcPair = BaseNSEMSrc
# def __init__(self, srcList, **kwargs):
# # Sort these by frequency
# self.source_list = srcList
# BaseSurvey.__init__(self, **kwargs)
# _freqDict = {}
# for src in srcList:
# if src.freq not in _freqDict:
# _freqDict[src.freq] = []
# _freqDict[src.freq] += [src]
# self._freqDict = _freqDict
# self._freqs = sorted([f for f in self._freqDict])
# @property
# def freqs(self):
# """Frequencies"""
# return self._freqs
# @property
# def nFreq(self):
# """Number of frequencies"""
# return len(self._freqDict)
# def getSrcByFreq(self, freq):
# """Returns the sources associated with a specific frequency."""
# assert freq in self._freqDict, "The requested frequency is not in this survey."
# return self._freqDict[freq]
# def eval(self, f):
# """
# Evalute and return Data given calculated fields
# :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM f: A NSEM fileds object to evaluate data from
# :retype: SimPEG.EM.NSEM.Data
# :return: NSEM Data object
# """
# data = Data(self)
# for src in self.source_list:
# sys.stdout.flush()
# for rx in src.receiver_list:
# data[src, rx] = rx.eval(src, self.mesh, f)
# return data
# def evalDeriv(self, f):
# raise Exception('Use Sources to project fields deriv.')
#########
# Data
#########
class Data(BaseData, DataNSEMPlotMethods):
"""
Data class for NSEMdata. Stores the data vector indexed by the survey.
"""
def __init__(self, survey, dobs=None, relative_error=None, noise_floor=None):
BaseData.__init__(self, survey, dobs, relative_error, noise_floor)
def toRecArray(self, returnType="RealImag"):
"""
Returns a numpy.recarray for a SimpegNSEM impedance data object.
:param returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')
:type returnType: str, optional
:rtype: numpy.recarray
:return: Record array with data, with indexed columns
"""
# Define the record fields
dtRI = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxxr", float),
("zxxi", float),
("zxyr", float),
("zxyi", float),
("zyxr", float),
("zyxi", float),
("zyyr", float),
("zyyi", float),
("tzxr", float),
("tzxi", float),
("tzyr", float),
("tzyi", float),
]
dtCP = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxx", complex),
("zxy", complex),
("zyx", complex),
("zyy", complex),
("tzx", complex),
("tzy", complex),
]
for src in self.survey.source_list:
# Temp array for all the receivers of the source.
# Note: needs to be written more generally,
# using diffterent rxTypes and not all the data at the locations
# Assume the same locs for all RX
locs = src.receiver_list[0].locations
if locs.shape[1] == 1:
locs = np.hstack((np.array([[0.0, 0.0]]), locs))
elif locs.shape[1] == 2:
locs = np.hstack((np.array([[0.0]]), locs))
tArrRec = np.concatenate(
(
src.freq * np.ones((locs.shape[0], 1)),
locs,
np.nan * np.ones((locs.shape[0], 12)),
),
axis=1,
).view(dtRI)
# Get the type and the value for the DataNSEM object as a list
typeList = [
[rx.orientation, rx.component, self[src, rx]]
for rx in src.receiver_list
]
# Insert the values to the temp array
for nr, (k, c, val) in enumerate(typeList):
zt_type = "t" if "z" in k else "z"
key = zt_type + k + c[0]
tArrRec[key] = mkvc(val, 2)
# Masked array
try:
outTemp = recFunc.stack_arrays((outTemp, tArrRec))
except NameError:
outTemp = tArrRec.copy()
if "RealImag" in returnType:
outArr = outTemp.copy()
elif "Complex" in returnType:
# Add the real and imaginary to a complex number
outArr = np.empty(outTemp.shape, dtype=dtCP)
for comp in ["freq", "x", "y", "z"]:
outArr[comp] = outTemp[comp].copy()
for comp in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]:
outArr[comp] = (
outTemp[comp + "r"].copy() + 1j * outTemp[comp + "i"].copy()
)
else:
raise NotImplementedError(
"{:s} is not implemented, as to be RealImag or Complex."
)
# Return
return outArr
@classmethod
def fromRecArray(cls, recArray, srcType="primary"):
"""
Class method that reads in a numpy record array to NSEMdata object.
:param recArray: Record array with the data. Has to have ('freq','x','y','z') columns and some ('zxx','zxy','zyx','zyy','tzx','tzy')
:type recArray: numpy.recarray
:param srcType: The type of SimPEG.EM.NSEM.SrcNSEM to be used
:type srcType: str, optional
"""
if srcType == "primary":
src = Planewave_xy_1Dprimary
elif srcType == "total":
src = Planewave_xy_1DhomotD
else:
raise NotImplementedError("{:s} is not a valid source type for NSEMdata")
# Find all the frequencies in recArray
uniFreq = np.unique(recArray["freq"].copy())
srcList = []
dataList = []
for freq in uniFreq:
# Initiate rxList
rxList = []
# Find that data for freq
dFreq = recArray[recArray["freq"] == freq].copy()
# Find the impedance rxTypes in the recArray.
rxTypes = [
comp
for comp in recArray.dtype.names
if (len(comp) == 4 or len(comp) == 3) and "z" in comp
]
for rxType in rxTypes:
# Find index of not nan values in rxType
notNaNind = ~np.isnan(dFreq[rxType].copy())
if np.any(notNaNind): # Make sure that there is any data to add.
locs = _rec_to_ndarr(dFreq[["x", "y", "z"]][notNaNind].copy())
if dFreq[rxType].dtype.name in "complex128":
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DTipper(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
elif "z" in rxType:
rxList.append(Point3DImpedance(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DImpedance(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
component = "real" if "r" in rxType else "imag"
if "z" in rxType:
rxList.append(
Point3DImpedance(locs, rxType[1:3], component)
)
dataList.append(dFreq[rxType][notNaNind].copy())
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList, freq))
# Make a survey
survey = Survey(srcList)
dataVec = np.hstack(dataList)
return cls(survey, dataVec)
def _rec_to_ndarr(rec_arr, data_type=float):
"""
Function to transform a numpy record array to a nd array.
dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import
"""
# fix for numpy >= 1.16.0
# https://numpy.org/devdocs/release/1.16.0-notes.html#multi-field-views-return-a-view-instead-of-a-copy
return np.array(recFunc.structured_to_unstructured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])),
dtype=data_type)
| 36.732824
| 157
| 0.520989
|
import sys
import numpy as np
from numpy.lib import recfunctions as recFunc
from ..frequency_domain.survey import Survey
from ...data import Data as BaseData
from ...utils import mkvc
from .sources import Planewave_xy_1Dprimary, Planewave_xy_1DhomotD
from .receivers import Point3DImpedance, Point3DTipper
from .utils.plot_utils import DataNSEMPlotMethods
t srcList: List of sources associated with the survey
# **Optional**
# """
# Evalute and return Data given calculated fields
# :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM f: A NSEM fileds object to evaluate data from
# :retype: SimPEG.EM.NSEM.Data
# :return: NSEM Data object
# """
survey, dobs=None, relative_error=None, noise_floor=None):
BaseData.__init__(self, survey, dobs, relative_error, noise_floor)
def toRecArray(self, returnType="RealImag"):
dtRI = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxxr", float),
("zxxi", float),
("zxyr", float),
("zxyi", float),
("zyxr", float),
("zyxi", float),
("zyyr", float),
("zyyi", float),
("tzxr", float),
("tzxi", float),
("tzyr", float),
("tzyi", float),
]
dtCP = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxx", complex),
("zxy", complex),
("zyx", complex),
("zyy", complex),
("tzx", complex),
("tzy", complex),
]
for src in self.survey.source_list:
locs = src.receiver_list[0].locations
if locs.shape[1] == 1:
locs = np.hstack((np.array([[0.0, 0.0]]), locs))
elif locs.shape[1] == 2:
locs = np.hstack((np.array([[0.0]]), locs))
tArrRec = np.concatenate(
(
src.freq * np.ones((locs.shape[0], 1)),
locs,
np.nan * np.ones((locs.shape[0], 12)),
),
axis=1,
).view(dtRI)
typeList = [
[rx.orientation, rx.component, self[src, rx]]
for rx in src.receiver_list
]
for nr, (k, c, val) in enumerate(typeList):
zt_type = "t" if "z" in k else "z"
key = zt_type + k + c[0]
tArrRec[key] = mkvc(val, 2)
try:
outTemp = recFunc.stack_arrays((outTemp, tArrRec))
except NameError:
outTemp = tArrRec.copy()
if "RealImag" in returnType:
outArr = outTemp.copy()
elif "Complex" in returnType:
outArr = np.empty(outTemp.shape, dtype=dtCP)
for comp in ["freq", "x", "y", "z"]:
outArr[comp] = outTemp[comp].copy()
for comp in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]:
outArr[comp] = (
outTemp[comp + "r"].copy() + 1j * outTemp[comp + "i"].copy()
)
else:
raise NotImplementedError(
"{:s} is not implemented, as to be RealImag or Complex."
)
return outArr
@classmethod
def fromRecArray(cls, recArray, srcType="primary"):
if srcType == "primary":
src = Planewave_xy_1Dprimary
elif srcType == "total":
src = Planewave_xy_1DhomotD
else:
raise NotImplementedError("{:s} is not a valid source type for NSEMdata")
uniFreq = np.unique(recArray["freq"].copy())
srcList = []
dataList = []
for freq in uniFreq:
rxList = []
dFreq = recArray[recArray["freq"] == freq].copy()
rxTypes = [
comp
for comp in recArray.dtype.names
if (len(comp) == 4 or len(comp) == 3) and "z" in comp
]
for rxType in rxTypes:
notNaNind = ~np.isnan(dFreq[rxType].copy())
if np.any(notNaNind):
locs = _rec_to_ndarr(dFreq[["x", "y", "z"]][notNaNind].copy())
if dFreq[rxType].dtype.name in "complex128":
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DTipper(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
elif "z" in rxType:
rxList.append(Point3DImpedance(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DImpedance(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
component = "real" if "r" in rxType else "imag"
if "z" in rxType:
rxList.append(
Point3DImpedance(locs, rxType[1:3], component)
)
dataList.append(dFreq[rxType][notNaNind].copy())
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList, freq))
survey = Survey(srcList)
dataVec = np.hstack(dataList)
return cls(survey, dataVec)
def _rec_to_ndarr(rec_arr, data_type=float):
tured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])),
dtype=data_type)
| true
| true
|
7902275a5ba87c6c4f294a59d97f714f0d537b1a
| 77,984
|
py
|
Python
|
terminusdb_client/woqlclient/woqlClient.py
|
terminusdb/woql-client-p
|
69f824159ee3c4977a9813f81cf3cb00c6efce01
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/woqlclient/woqlClient.py
|
terminusdb/woql-client-p
|
69f824159ee3c4977a9813f81cf3cb00c6efce01
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/woqlclient/woqlClient.py
|
terminusdb/woql-client-p
|
69f824159ee3c4977a9813f81cf3cb00c6efce01
|
[
"Apache-2.0"
] | null | null | null |
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
| 33.241262
| 313
| 0.552408
|
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
class JWTAuth(requests.auth.AuthBase):
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
def __init__(self, server_url: str, **kwargs) -> None:
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
self._connected = False
def _check_connection(self, check_db=True) -> None:
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
able in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
porary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
porary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
) -> Union[dict, str]:
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
document_id: Union[str, None] = None
):
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema",
"DocumentTemplate",
List["DocumentTemplate"],
],
patch: Patch,
):
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
def get_database(self, dbid: str) -> Optional[dict]:
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
| true
| true
|
7902286a22c1d2ca76b1aa31973b48dc206917ce
| 4,725
|
py
|
Python
|
tests/test_examples.py
|
MF-Zerai/HiSim
|
7497e6791dc937ee6e26ceafbf1bc2ae2449f123
|
[
"MIT"
] | null | null | null |
tests/test_examples.py
|
MF-Zerai/HiSim
|
7497e6791dc937ee6e26ceafbf1bc2ae2449f123
|
[
"MIT"
] | null | null | null |
tests/test_examples.py
|
MF-Zerai/HiSim
|
7497e6791dc937ee6e26ceafbf1bc2ae2449f123
|
[
"MIT"
] | 1
|
2022-03-13T16:15:36.000Z
|
2022-03-13T16:15:36.000Z
|
import os
from hisim import hisim_main
from hisim.simulationparameters import SimulationParameters
import shutil
import random
from hisim import log
from hisim.utils import PostProcessingOptions
import matplotlib.pyplot as plt
from hisim import utils
@utils.measure_execution_time
def test_basic_household():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_default_connections():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_with_default_connections"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_all_resultfiles():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
for option in PostProcessingOptions:
mysimpar.post_processing_options.append(option)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
#
# def test_basic_household_with_all_resultfiles_full_year():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
# path = "../examples/basic_household.py"
# func = "basic_household_explicit"
# mysimpar = SimulationParameters.full_year(year=2019, seconds_per_timestep=60)
# for option in PostProcessingOptions:
# mysimpar.post_processing_options.append(option)
# log.information(option)
# hisim_main.main(path, func,mysimpar)
# log.information(os.getcwd())
# def test_basic_household_boiler():
# path = "../examples/basic_household_boiler.py"
# func = "basic_household_boiler_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_districtheating():
# path = "../examples/basic_household_Districtheating.py"
# func = "basic_household_Districtheating_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_oilheater():
# path = "../examples/basic_household_Oilheater.py"
# func = "basic_household_Oilheater_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_modular_household_configurations( ):
path = "../examples/modular_household.py"
func = "modular_household_explicit"
mysimpar = SimulationParameters.one_day_only( year = 2019, seconds_per_timestep = 60 )
# for pv_included in [ True, False ]:
# for smart_devices_included in [ True, False ]:
# for boiler_included in [ 'electricity', 'hydrogen', None ]:
# for heating_device_included in [ 'heat_pump', 'oil_heater', 'district_heating' ]:
predictive = True
pv_included = random.choice( [ True, False ] )
smart_devices_included = random.choice( [ True, False ] )
boiler_included = random.choice( [ 'electricity', 'hydrogen', None ] )
heating_device_included = random.choice( [ 'heat_pump', 'oil_heater', 'district_heating' ] )
mysimpar.reset_system_config( predictive = predictive,
pv_included = pv_included,
smart_devices_included = smart_devices_included,
boiler_included = boiler_included,
heating_device_included = heating_device_included )
hisim_main.main( path, func, mysimpar )
@utils.measure_execution_time
def test_first_example():
path = "../examples/examples.py"
func = "first_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_second_example():
path = "../examples/examples.py"
func = "second_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
| 44.575472
| 99
| 0.720212
|
import os
from hisim import hisim_main
from hisim.simulationparameters import SimulationParameters
import shutil
import random
from hisim import log
from hisim.utils import PostProcessingOptions
import matplotlib.pyplot as plt
from hisim import utils
@utils.measure_execution_time
def test_basic_household():
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_default_connections():
path = "../examples/basic_household.py"
func = "basic_household_with_default_connections"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_all_resultfiles():
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
for option in PostProcessingOptions:
mysimpar.post_processing_options.append(option)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_modular_household_configurations( ):
path = "../examples/modular_household.py"
func = "modular_household_explicit"
mysimpar = SimulationParameters.one_day_only( year = 2019, seconds_per_timestep = 60 )
predictive = True
pv_included = random.choice( [ True, False ] )
smart_devices_included = random.choice( [ True, False ] )
boiler_included = random.choice( [ 'electricity', 'hydrogen', None ] )
heating_device_included = random.choice( [ 'heat_pump', 'oil_heater', 'district_heating' ] )
mysimpar.reset_system_config( predictive = predictive,
pv_included = pv_included,
smart_devices_included = smart_devices_included,
boiler_included = boiler_included,
heating_device_included = heating_device_included )
hisim_main.main( path, func, mysimpar )
@utils.measure_execution_time
def test_first_example():
path = "../examples/examples.py"
func = "first_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_second_example():
path = "../examples/examples.py"
func = "second_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
| true
| true
|
790228f0d7b0b7c0c0bf97f9f62fc9c2186c692e
| 404
|
py
|
Python
|
python_part/trees.py
|
daniel98789/bullet3
|
b57aa900293e21f7808ea2697a5b64b494867492
|
[
"Zlib"
] | null | null | null |
python_part/trees.py
|
daniel98789/bullet3
|
b57aa900293e21f7808ea2697a5b64b494867492
|
[
"Zlib"
] | null | null | null |
python_part/trees.py
|
daniel98789/bullet3
|
b57aa900293e21f7808ea2697a5b64b494867492
|
[
"Zlib"
] | null | null | null |
import PyBulletEnv
import Obj
from numpy import random
if __name__ == "__main__":
env = PyBulletEnv.PyBulletEnv()
env.setup()
tree = Obj.Obj("data/tree/Tree.obj")
forest = []
for _ in range(2):
x = random.uniform(0, 20)
y = random.uniform(0, 20)
forest.append(tree.createObjectObj([x, y , 3.7], 1.0))
env.analyze(tree, forest)
env.run()
| 18.363636
| 62
| 0.589109
|
import PyBulletEnv
import Obj
from numpy import random
if __name__ == "__main__":
env = PyBulletEnv.PyBulletEnv()
env.setup()
tree = Obj.Obj("data/tree/Tree.obj")
forest = []
for _ in range(2):
x = random.uniform(0, 20)
y = random.uniform(0, 20)
forest.append(tree.createObjectObj([x, y , 3.7], 1.0))
env.analyze(tree, forest)
env.run()
| true
| true
|
790229210c0ff020be13cad60a8632a62d337f0e
| 2,584
|
py
|
Python
|
test/test_v3_client.py
|
mukasaj/Ex-assemblyline-client
|
f3605157d0d5c8ecc852d8cdf5ef9ae2f15a42a3
|
[
"MIT"
] | null | null | null |
test/test_v3_client.py
|
mukasaj/Ex-assemblyline-client
|
f3605157d0d5c8ecc852d8cdf5ef9ae2f15a42a3
|
[
"MIT"
] | null | null | null |
test/test_v3_client.py
|
mukasaj/Ex-assemblyline-client
|
f3605157d0d5c8ecc852d8cdf5ef9ae2f15a42a3
|
[
"MIT"
] | null | null | null |
import assemblyline_client
import mocks
import mock
from base64 import b64decode
def test_bad_cert():
"""Make sure that the client detects that the test cert is self signed."""
with mocks.Server() as server:
try:
assemblyline_client.get_client(server.address)
assert False
except assemblyline_client.ClientError as ce:
assert 'CERTIFICATE_VERIFY_FAILED' in str(ce) or 'certificate verify failed' in str(ce)
def test_noauth():
"""The test server should let us login with no authentication."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False)
assert len(server.logins) == 1
def test_noauth_submit(mocker):
"""Submit a file and ensure that the same file is unpacked."""
with mocks.Server() as server:
client = assemblyline_client.get_client(server.address, verify=False)
submits = server.submits
# Submit a file with contents
client.submit(path='readme.txt', contents=b'abc123')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
# Submit a file from a file
mocker.patch('os.path.exists', return_value=True)
mocker.patch('assemblyline_client.v3_client.open', mock.mock_open(read_data=b'abc123'), create=True)
client.submit(path='readme.txt')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
def test_encrypt_password_auth():
"""Send an encryped password and decrypt it."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, auth=('username', 'password'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['password'] != 'password'
assert server.private_key.decrypt(b64decode(server.logins[0]['password']), 'ERROR') == b'password'
def test_encrypt_apikey_auth():
"""Send an encryped apikey and decrypt it."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, apikey=('username', 'ANAPIKEY'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['apikey'] != 'ANAPIKEY'
assert server.private_key.decrypt(b64decode(server.logins[0]['apikey']), 'ERROR') == b'ANAPIKEY'
| 38
| 108
| 0.662152
|
import assemblyline_client
import mocks
import mock
from base64 import b64decode
def test_bad_cert():
with mocks.Server() as server:
try:
assemblyline_client.get_client(server.address)
assert False
except assemblyline_client.ClientError as ce:
assert 'CERTIFICATE_VERIFY_FAILED' in str(ce) or 'certificate verify failed' in str(ce)
def test_noauth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False)
assert len(server.logins) == 1
def test_noauth_submit(mocker):
with mocks.Server() as server:
client = assemblyline_client.get_client(server.address, verify=False)
submits = server.submits
client.submit(path='readme.txt', contents=b'abc123')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('assemblyline_client.v3_client.open', mock.mock_open(read_data=b'abc123'), create=True)
client.submit(path='readme.txt')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
def test_encrypt_password_auth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, auth=('username', 'password'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['password'] != 'password'
assert server.private_key.decrypt(b64decode(server.logins[0]['password']), 'ERROR') == b'password'
def test_encrypt_apikey_auth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, apikey=('username', 'ANAPIKEY'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['apikey'] != 'ANAPIKEY'
assert server.private_key.decrypt(b64decode(server.logins[0]['apikey']), 'ERROR') == b'ANAPIKEY'
| true
| true
|
7902293dc0914a97b8baa901246888adae996d22
| 11,381
|
py
|
Python
|
utils/training_loop.py
|
houcharlie/federated
|
b8b12f2f424f4c637be1e1fe8482ecc94ee3765a
|
[
"Apache-2.0"
] | null | null | null |
utils/training_loop.py
|
houcharlie/federated
|
b8b12f2f424f4c637be1e1fe8482ecc94ee3765a
|
[
"Apache-2.0"
] | null | null | null |
utils/training_loop.py
|
houcharlie/federated
|
b8b12f2f424f4c637be1e1fe8482ecc94ee3765a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal dispatcher for training loops."""
import contextlib
import os.path
import pprint
import time
from typing import Any, Callable, Dict, List, Optional
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
class IterativeProcessCompatibilityError(TypeError):
pass
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
logging.info('Skipping creation of directory [%s], already exists', path)
def _setup_outputs(root_output_dir,
experiment_name,
rounds_per_profile=0):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
@contextlib.contextmanager
def profiler(round_num):
if (rounds_per_profile > 0 and round_num % rounds_per_profile == 0):
with tf.profiler.experimental.Profile(summary_logdir):
yield
else:
yield
return checkpoint_mngr, metrics_mngr, tb_mngr, profiler
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num):
"""Atomic metrics writer which inlines logic from MetricsHook class."""
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
metrics_mngr.save_metrics(metrics, round_num)
tb_mngr.save_metrics(metrics, round_num)
def _compute_numpy_l2_difference(model, previous_model):
squared_norms = tf.nest.map_structure(lambda x, y: tf.linalg.norm(x - y)**2,
model, previous_model)
l2_total_tensor = tf.reduce_sum(tf.nest.flatten(squared_norms))**0.5
return l2_total_tensor.numpy()
def _check_iterative_process_compatibility(iterative_process):
"""Checks the compatibility of an iterative process with the training loop."""
error_message = (
'The iterative_process argument must be of '
'type`tff.templates.IterativeProcess`, and must have an '
'attribute `get_model_weights`, which must be a `tff.Computation`. This '
'computation must accept as input the state of `iterative_process`, and '
'its output must be a nested structure of tensors matching the input '
'shape of `validation_fn`.')
compatibility_error = IterativeProcessCompatibilityError(error_message)
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise compatibility_error
if not hasattr(iterative_process, 'get_model_weights'):
raise compatibility_error
elif not callable(iterative_process.get_model_weights):
raise compatibility_error
get_model_weights_fn = iterative_process.get_model_weights
if not isinstance(get_model_weights_fn, tff.Computation):
raise compatibility_error
input_type = get_model_weights_fn.type_signature.parameter
server_state_type = iterative_process.state_type.member
server_state_type.is_assignable_from(input_type)
# TODO(b/174268978): Once we enforce federated evaluations, we can check
# compatibility with `validation_fn` without actually running the function.
def run(iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int], List[tf.data.Dataset]],
validation_fn: Callable[[Any, int], Dict[str, float]],
total_rounds: int,
experiment_name: str,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50,
rounds_per_profile: Optional[int] = 0):
"""Runs federated training for a given `tff.templates.IterativeProcess`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
The iterative process must also have a callable attribute `get_model_weights`
that takes as input the state of the iterative process, and returns a
`tff.learning.ModelWeights` object.
Args:
iterative_process: A `tff.templates.IterativeProcess` instance to run.
client_datasets_fn: Function accepting an integer argument (the round
number) and returning a list of client datasets to use as federated data
for that round.
validation_fn: A callable accepting a `tff.learning.ModelWeights` and the
current round number, and returning a dict of evaluation metrics. Used to
compute validation metrics throughout the training process.
total_rounds: The number of federated training rounds to perform.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
test_fn: An optional callable accepting a `tff.learning.ModelWeights` and
returning a dict of test set metrics. Used to compute test metrics at the
end of the training process.
root_output_dir: The name of the root output directory for writing
experiment outputs.
rounds_per_eval: How often to compute validation metrics.
rounds_per_checkpoint: How often to checkpoint the iterative process state.
If you expect the job to restart frequently, this should be small. If no
interruptions are expected, this can be made larger.
rounds_per_profile: Experimental setting. If set to a value greater than 0,
this dictates how often a TensorFlow profiler is run.
Returns:
The final `state` of the iterative process after training.
"""
_check_iterative_process_compatibility(iterative_process)
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
checkpoint_mngr, metrics_mngr, tb_mngr, profiler = _setup_outputs(
root_output_dir, experiment_name, rounds_per_profile)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = checkpoint_mngr.load_latest_checkpoint(initial_state)
if state is None:
logging.info('Initializing experiment from scratch.')
state = initial_state
round_num = 0
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1 # Increment to avoid overwriting current checkpoint
metrics_mngr.clear_metrics(round_num)
current_model = iterative_process.get_model_weights(state)
loop_start_time = time.time()
loop_start_round = round_num
while round_num < total_rounds:
data_prep_start_time = time.time()
federated_train_data = client_datasets_fn(round_num)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
prev_model = current_model
# TODO(b/145604851): This try/except is used to circumvent ambiguous TF
# errors during training, and should be removed once the root cause is
# determined (and possibly fixed).
try:
with profiler(round_num):
state, round_metrics = iterative_process.next(state,
federated_train_data)
except (tf.errors.FailedPreconditionError, tf.errors.NotFoundError,
tf.errors.InternalError) as e:
logging.warning('Caught %s exception while running round %d:\n\t%s',
type(e), round_num, e)
continue # restart the loop without incrementing the round number
current_model = iterative_process.get_model_weights(state)
train_metrics['training_secs'] = time.time() - training_start_time
train_metrics['model_delta_l2_norm'] = _compute_numpy_l2_difference(
current_model, prev_model)
train_metrics['client_drift'] = state.client_drift
train_metrics.update(round_metrics)
loop_time = time.time() - loop_start_time
loop_rounds = (round_num - loop_start_round + 1)
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, loop_time / loop_rounds))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
checkpoint_mngr.save_checkpoint(state, round_num)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if round_num % rounds_per_eval == 0:
# Compute validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, round_num)
round_num += 1
# Final metrics evaluation once the training has completed
metrics = {}
# Validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
# Test set metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(current_model)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds)
return state
| 41.086643
| 80
| 0.734645
|
import contextlib
import os.path
import pprint
import time
from typing import Any, Callable, Dict, List, Optional
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
class IterativeProcessCompatibilityError(TypeError):
pass
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
logging.info('Skipping creation of directory [%s], already exists', path)
def _setup_outputs(root_output_dir,
experiment_name,
rounds_per_profile=0):
if not experiment_name:
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
@contextlib.contextmanager
def profiler(round_num):
if (rounds_per_profile > 0 and round_num % rounds_per_profile == 0):
with tf.profiler.experimental.Profile(summary_logdir):
yield
else:
yield
return checkpoint_mngr, metrics_mngr, tb_mngr, profiler
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num):
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
metrics_mngr.save_metrics(metrics, round_num)
tb_mngr.save_metrics(metrics, round_num)
def _compute_numpy_l2_difference(model, previous_model):
squared_norms = tf.nest.map_structure(lambda x, y: tf.linalg.norm(x - y)**2,
model, previous_model)
l2_total_tensor = tf.reduce_sum(tf.nest.flatten(squared_norms))**0.5
return l2_total_tensor.numpy()
def _check_iterative_process_compatibility(iterative_process):
error_message = (
'The iterative_process argument must be of '
'type`tff.templates.IterativeProcess`, and must have an '
'attribute `get_model_weights`, which must be a `tff.Computation`. This '
'computation must accept as input the state of `iterative_process`, and '
'its output must be a nested structure of tensors matching the input '
'shape of `validation_fn`.')
compatibility_error = IterativeProcessCompatibilityError(error_message)
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise compatibility_error
if not hasattr(iterative_process, 'get_model_weights'):
raise compatibility_error
elif not callable(iterative_process.get_model_weights):
raise compatibility_error
get_model_weights_fn = iterative_process.get_model_weights
if not isinstance(get_model_weights_fn, tff.Computation):
raise compatibility_error
input_type = get_model_weights_fn.type_signature.parameter
server_state_type = iterative_process.state_type.member
server_state_type.is_assignable_from(input_type)
def run(iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int], List[tf.data.Dataset]],
validation_fn: Callable[[Any, int], Dict[str, float]],
total_rounds: int,
experiment_name: str,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50,
rounds_per_profile: Optional[int] = 0):
_check_iterative_process_compatibility(iterative_process)
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
checkpoint_mngr, metrics_mngr, tb_mngr, profiler = _setup_outputs(
root_output_dir, experiment_name, rounds_per_profile)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = checkpoint_mngr.load_latest_checkpoint(initial_state)
if state is None:
logging.info('Initializing experiment from scratch.')
state = initial_state
round_num = 0
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1
metrics_mngr.clear_metrics(round_num)
current_model = iterative_process.get_model_weights(state)
loop_start_time = time.time()
loop_start_round = round_num
while round_num < total_rounds:
data_prep_start_time = time.time()
federated_train_data = client_datasets_fn(round_num)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
prev_model = current_model
try:
with profiler(round_num):
state, round_metrics = iterative_process.next(state,
federated_train_data)
except (tf.errors.FailedPreconditionError, tf.errors.NotFoundError,
tf.errors.InternalError) as e:
logging.warning('Caught %s exception while running round %d:\n\t%s',
type(e), round_num, e)
continue
current_model = iterative_process.get_model_weights(state)
train_metrics['training_secs'] = time.time() - training_start_time
train_metrics['model_delta_l2_norm'] = _compute_numpy_l2_difference(
current_model, prev_model)
train_metrics['client_drift'] = state.client_drift
train_metrics.update(round_metrics)
loop_time = time.time() - loop_start_time
loop_rounds = (round_num - loop_start_round + 1)
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, loop_time / loop_rounds))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
checkpoint_mngr.save_checkpoint(state, round_num)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if round_num % rounds_per_eval == 0:
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, round_num)
round_num += 1
metrics = {}
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(current_model)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds)
return state
| true
| true
|
790229404fe0d9f9f004577ef9d50df5c9a93fca
| 526
|
py
|
Python
|
full-problems/knapsackWithDuplicates.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
full-problems/knapsackWithDuplicates.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
full-problems/knapsackWithDuplicates.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# https://practice.geeksforgeeks.org/problems/knapsack-with-duplicate-items/0
def sol(n, w, wt, v):
"""
We do not need to create a 2d array here because all numbers are available
always
Try all items for weight ranging from 1 to w and check if weight
can be picked. Take the max of the result
"""
dp = [0 for i in range(w+1)]
for i in range(n):
for j in range(w+1):
if wt[i] <= j:
dp[j] = max(dp[j], v[i]+dp[j-wt[i]])
return dp[w]
| 30.941176
| 78
| 0.587452
|
def sol(n, w, wt, v):
dp = [0 for i in range(w+1)]
for i in range(n):
for j in range(w+1):
if wt[i] <= j:
dp[j] = max(dp[j], v[i]+dp[j-wt[i]])
return dp[w]
| true
| true
|
790229476a7b983d651371dbb6bc37cebd97f560
| 21,127
|
py
|
Python
|
qsdsan/sanunits/_suspended_growth_bioreactor.py
|
QSD-for-WaSH/sanitation
|
cbcbdd7ead382a6e66b51b5193852494ab3f081b
|
[
"Unlicense"
] | 2
|
2020-11-16T17:27:47.000Z
|
2020-11-19T16:10:45.000Z
|
qsdsan/sanunits/_suspended_growth_bioreactor.py
|
QSD-for-WaSH/sanitation
|
cbcbdd7ead382a6e66b51b5193852494ab3f081b
|
[
"Unlicense"
] | null | null | null |
qsdsan/sanunits/_suspended_growth_bioreactor.py
|
QSD-for-WaSH/sanitation
|
cbcbdd7ead382a6e66b51b5193852494ab3f081b
|
[
"Unlicense"
] | 3
|
2020-10-29T16:31:39.000Z
|
2020-11-05T17:09:06.000Z
|
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Joy Zhang <joycheung1994@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
from .. import SanUnit, WasteStream, Process, Processes, CompiledProcesses
from ._clarifier import _settling_flux
from sympy import symbols, lambdify, Matrix
from scipy.integrate import solve_ivp
from warnings import warn
from math import floor, ceil
import numpy as np
import pandas as pd
from numba import njit
__all__ = ('CSTR',
'SBR',
# 'PFR',
)
def _add_aeration_to_growth_model(aer, model):
if isinstance(aer, Process):
processes = Processes(model.tuple)
processes.append(aer)
processes.compile()
else:
processes = model
processes.compile()
return processes
# %%
@njit(cache=True)
def dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
@njit(cache=True)
def dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
#%%
class CSTR(SanUnit):
'''
A single continuous stirred tank reactor.
Parameters
----------
ID : str
ID for the reactor.
ins : :class:`WasteStream`
Influents to the reactor. Can be an array of up to 3 WasteStream objects by
default, typically wastewater to be treated, recycled effluent, recycled
activated sludge.
outs : :class:`WasteStream`
Treated effluent.
split : iterable of float
Volumetric splits of effluent flows if there are more than one effluent.
The default is None.
V_max : float
Designed volume, in [m^3]. The default is 1000.
aeration : float or :class:`Process`, optional
Aeration setting. Either specify a targeted dissolved oxygen concentration
in [mg O2/L] or provide a :class:`Process` object to represent aeration,
or None for no aeration. The default is 2.0.
DO_ID : str, optional
The :class:`Component` ID for dissolved oxygen, only relevant when the
reactor is aerated. The default is 'S_O2'.
suspended_growth_model : :class:`Processes`, optional
The suspended growth biokinetic model. The default is None.
'''
_N_ins = 3
_N_outs = 1
_ins_size_is_fixed = False
_outs_size_is_fixed = False
def __init__(self, ID='', ins=None, outs=(), split=None, thermo=None,
init_with='WasteStream', V_max=1000, aeration=2.0,
DO_ID='S_O2', suspended_growth_model=None,
isdynamic=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with, isdynamic=isdynamic)
self._V_max = V_max
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._concs = None
self._mixed = WasteStream()
self.split = split
for attr, value in kwargs.items():
setattr(self, attr, value)
@property
def V_max(self):
'''[float] The designed maximum liquid volume, not accounting for increased volume due to aeration, in m^3.'''
return self._V_max
@V_max.setter
def V_max(self, Vm):
self._V_max = Vm
@property
def aeration(self):
'''[:class:`Process` or float or NoneType] Aeration model.'''
return self._aeration
@aeration.setter
def aeration(self, ae):
if ae is None or isinstance(ae, Process): self._aeration = ae
elif isinstance(ae, (float, int)):
if ae < 0:
raise ValueError('targeted dissolved oxygen concentration for aeration must be non-negative.')
else:
if ae > 14:
warn(f'targeted dissolved oxygen concentration for {self.ID} might exceed the saturated level.')
self._aeration = ae
else:
raise TypeError(f'aeration must be one of the following types: float, '
f'int, Process, NoneType. Not {type(ae)}')
@property
def suspended_growth_model(self):
'''[:class:`CompiledProcesses` or NoneType] Suspended growth model.'''
return self._model
@suspended_growth_model.setter
def suspended_growth_model(self, model):
if isinstance(model, CompiledProcesses) or model is None: self._model = model
else: raise TypeError(f'suspended_growth_model must be one of the following '
f'types: CompiledProesses, NoneType. Not {type(model)}')
@property
def DO_ID(self):
'''[str] The `Component` ID for dissolved oxygen used in the suspended growth model and the aeration model.'''
return self._DO_ID
@DO_ID.setter
def DO_ID(self, doid):
if doid not in self.components.IDs:
raise ValueError(f'DO_ID must be in the set of `CompiledComponents` used to set thermo, '
f'i.e., one of {self.components.IDs}.')
self._DO_ID = doid
@property
def split(self):
'''[numpy.1darray or NoneType] The volumetric split of outs.'''
return self._split
@split.setter
def split(self, split):
if split is None: self._split = split
else:
if len(split) != len(self._outs):
raise ValueError('split and outs must have the same size')
self._split = np.array(split)/sum(split)
@property
def state(self):
'''The state of the CSTR, including component concentrations [mg/L] and flow rate [m^3/d].'''
if self._state is None: return None
else:
return dict(zip(list(self.components.IDs) + ['Q'], self._state))
@state.setter
def state(self, QCs):
QCs = np.asarray(QCs)
if QCs.shape != (len(self.components)+1, ):
raise ValueError(f'state must be a 1D array of length {len(self.components) + 1},'
'indicating component concentrations [mg/L] and total flow rate [m^3/d]')
self._state = QCs
def set_init_conc(self, **kwargs):
'''set the initial concentrations [mg/L] of the CSTR.'''
Cs = np.zeros(len(self.components))
cmpx = self.components.index
for k, v in kwargs.items(): Cs[cmpx(k)] = v
self._concs = Cs
def _init_state(self):
mixed = self._mixed
Q = mixed.get_total_flow('m3/d')
if self._concs is not None: Cs = self._concs
else: Cs = mixed.conc
self._state = np.append(Cs, Q).astype('float64')
self._dstate = self._state * 0.
def _update_state(self):
arr = self._state
if self.split is None: self._outs[0].state = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.state = y
def _update_dstate(self):
arr = self._dstate
if self.split is None: self._outs[0].dstate = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.dstate = y
def _run(self):
'''Only to converge volumetric flows.'''
mixed = self._mixed # avoid creating multiple new streams
mixed.mix_from(self.ins)
Q = mixed.F_vol # m3/hr
if self.split is None: self.outs[0].copy_like(mixed)
else:
for ws, spl in zip(self._outs, self.split):
ws.copy_like(mixed)
ws.set_total_flow(Q*spl, 'm3/hr')
def get_retained_mass(self, biomass_IDs):
cmps = self.components
mass = cmps.i_mass * self._state[:-1]
return self._V_max * mass[cmps.indices(biomass_IDs)].sum()
@property
def ODE(self):
if self._ODE is None:
self._compile_ODE()
return self._ODE
def _compile_ODE(self):
isa = isinstance
C = list(symbols(self.components.IDs))
m = len(C)
if self._model is None:
warn(f'{self.ID} was initialized without a suspended growth model, '
f'and thus run as a non-reactive unit')
r = lambda *args: np.zeros(m)
else:
processes = _add_aeration_to_growth_model(self._aeration, self._model)
r_eqs = list(processes.production_rates.rate_of_production)
r = lambdify(C, r_eqs, 'numpy')
_dstate = self._dstate
_update_dstate = self._update_dstate
V_arr = np.full(m, self._V_max)
Q_e_arr = np.zeros(m)
if isa(self._aeration, (float, int)):
i = self.components.index(self._DO_ID)
fixed_DO = self._aeration
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
Cs[i] = fixed_DO
dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_dstate[i] = 0
_update_dstate()
else:
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_update_dstate()
self._ODE = dy_dt
def _design(self):
pass
class SBR(SanUnit):
'''
Sequential batch reactors operated in parallel. The number of reactors is
determined by operation cycle and influent flowrate. [1]_
Parameters
----------
ID : str
ID for the reactors. The default is ''.
ins : :class:`WasteStream`
Influent to the reactor. Expected number of influent is 1.
outs : :class:`WasteStream`
Treated effluent and wasted sludge.
surface_area : float, optional
Surface area of the reactor bottom, in [m^2]. The reactor is assumed
to be cylinder. The default is 1500.
height : float, optional
Height of the reactor, in [m]. The default is 4.
operation_cycle : iterable of float, optional
Operation cycle of the SBR, time for each stage specified in [h]. There
are 7 stages: 1 - fill, 2 - fill, 3 - mix, 4 - mix, 5 - settle, 6 - decant,
7 - desludge. The first 4 stages are modeled as a biological reactor.
The 5th stage is modeled as a 1D N-layer settler. The last 2 stages are
assumed inactive. The default is (0.5, 1.5, 2.0, 0, 1.0, 0.5, 0.1).
aeration : iterable of float and/or :class:`Process`, optional
Aeration settings for the first 4 stages of the operation cycle. Either
specify a targeted dissolved oxygen concentration in [mg O2/L] or provide
a :class:`Process` object to represent aeration, or None for no aeration.
The default is (None, None, None, 2.0).
DO_ID : str, optional
The :class:`Component` ID for dissolved oxygen, only relevant when the
reactor is aerated. The default is 'S_O2'.
suspended_growth_model : :class:`Processes`, optional
The suspended growth biokinetic model. The default is None.
N_layer : int, optional
The number of layers to model settling. The default is 10.
pumped_flow : float, optional
Designed effluent flowrate, in [m^3/d]. The default is None.
underflow : float, optional
Designed wasted activated sludge flowrate, in [m^3/d]. The default is None.
X_threshold : float, optional
Threshold suspended solid concentration, in [g/m^3]. The default is 3000.
v_max : float, optional
Maximum theoretical (i.e. Vesilind) settling velocity, in [m/d]. The
default is 474.
v_max_practical : float, optional
Maximum practical settling velocity, in [m/d]. The default is 250.
rh : float, optional
Hindered zone settling parameter in the double-exponential settling velocity
function, in [m^3/g]. The default is 5.76e-4.
rp : float, optional
Flocculant zone settling parameter in the double-exponential settling velocity
function, in [m^3/g]. The default is 2.86e-3.
fns : float, optional
Non-settleable fraction of the suspended solids, dimensionless. Must be within
[0, 1]. The default is 2.28e-3.
cache_state : bool, optional
Whether to store volume and composition of retained sludge in the tank from
most recent run. The default is True.
References
----------
.. [1] Takács, I.; Patry, G. G.; Nolasco, D. A Dynamic Model of the Clarification
-Thickening Process. Water Res. 1991, 25 (10), 1263–1271.
https://doi.org/10.1016/0043-1354(91)90066-Y.
'''
_N_ins = 1
_N_outs = 2
def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',
surface_area=1500, height=4,
operation_cycle=(0.5, 1.5, 2.0, 0, 1.0, 0.5, 0.1),
aeration=(None, None, None, 2.0), DO_ID='S_O2',
suspended_growth_model=None, N_layer=10,
pumped_flow=None, underflow=None,
X_threshold=3000, v_max=474, v_max_practical=250,
rh=5.76e-4, rp=2.86e-3, fns=2.28e-3,
cache_state=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with)
self._V = surface_area * height
self._A = surface_area
self._h = height
self._operation_cycle = operation_cycle
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._N_layer = N_layer
self._Q_e = pumped_flow
self._Q_WAS = underflow
self._X_t = X_threshold
self._v_max = v_max
self._v_max_p = v_max_practical
self._rh = rh
self._rp = rp
self._fns = fns
self._cache_state = cache_state
for attr, value in kwargs.items():
setattr(self, attr, value)
self._init_Vas = None
self._init_Cas = None
self._dynamic_composition = None
@property
def operation_cycle(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2', 'settle', 'decant', 'desludge'),
self._operation_cycle))
@property
def total_cycle_time(self):
return sum(self._operation_cycle)
@property
def aeration(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2'),
self._aeration[:4]))
@property
def C_t(self):
if self._dynamic_composition:
return pd.DataFrame(self._dynamic_composition,
columns = ['Time[d]'] + list(self.components.IDs))
else: return None
def _run(self, cache_state=True):
if self._model is None:
raise RuntimeError(f'{self.ID} was initialized without a suspended growth model.')
else:
isa = isinstance
inf = self.ins[0]
Q_in = inf.get_total_flow('m3/d')
eff, sludge = self.outs
eff.copy_like(inf)
sludge.copy_like(inf)
C_in = inf.mass / inf.F_vol * 1e3 # concentrations in g/m3
cmps = self.components
C = list(symbols(cmps.IDs))
if self._init_Vas is not None:
V_0 = self._init_Vas
C_0 = self._init_Cas
else:
V_0 = 0
C_0 = C_in
n = self._N_layer
if self._aeration.count(None) == len(self._aeration):
Vmax = self._V
hj = self._h/n
else:
Vmax = self._V*0.75
hj = self._h*0.75/n
# ********fill and mix/aerate stages***********
T_fill = (Vmax - V_0)/Q_in # maximum total fill time in day
T = [t/24 for t in self._operation_cycle] # operation cycle in day
if T_fill <= T[0]:
schedule = [T_fill, T[0]-T_fill] + T[1:4]
aer = [self._aeration[0], self._aeration[0]] + list(self._aeration[1:4])
fill = [True] + [False]*4
V_total = Vmax
elif T_fill <= T[0]+T[1]:
schedule = [T[0], T_fill-T[0], T[0]+T[1]-T_fill] + T[2:4]
aer = list(self._aeration[:2]) + [self._aeration[1]] + list(self._aeration[2:4])
fill = [True]*2 + [False]*3
V_total = Vmax
else:
schedule = T[:4]
aer = list(self._aeration[:4])
fill = [True]*2 + [False]*2
V_total = Q_in*(T[0]+T[1])+V_0
hj = V_total/self._V*self._h/n
for i in range(1, len(schedule)):
if fill[-i] == fill[-i-1] and aer[-i] == aer[-i-1]:
schedule[-i-1] += schedule[-i]
schedule[-i] = 0
t_arr = np.array([])
y_mat = np.ndarray([])
for i in range(len(schedule)):
if schedule[i] > 0:
dC_dt, J_func = self._compile_dC_dt(V_0, Q_in, C_in, C, fill[i], aer[i])
if isa(aer[i], (float, int)): C_0[cmps.index(self._DO_ID)] = aer[i]
sol = solve_ivp(dC_dt, (0, schedule[i]), C_0, method='BDF', jac=J_func)
C_0 = sol.y.transpose()[-1]
V_0 += Q_in * schedule[i] * fill[i]
t_arr = np.concatenate((t_arr, sol.t + t_arr[-1]))
y_mat = np.hstack((y_mat, sol.y))
self._dynamic_composition = np.vstack((t_arr, y_mat)).transpose()
# *********settle, decant, desludge**********
eff.set_flow(C_0*eff.F_vol, 'g/hr', self.components.IDs)
X_0 = eff.get_TSS()
X_min = X_0 * self._fns
T_settle = T[4]
def dX_dt(t, X):
VX = [_settling_flux(x, self._v_max, self._v_max_p, X_min, self._rh, self._rp) for x in X]
J = [VX[j] if X[j+1] <= self._X_t else min(VX[j], VX[j+1]) for j in range(n-1)]
settle_out = np.array(J + [0])
settle_in = np.array([0] + J)
dXdt = (settle_in - settle_out)/hj
return dXdt
sol = solve_ivp(dX_dt, (0, T_settle), np.ones(n)*X_0)
X = sol.y.transpose()[-1]
V_eff = min(T[5]*self._Q_e, V_total*(n-1)/n)
n_eff = V_eff/V_total
w_eff = np.array([1]*floor(n_eff)+[n_eff-floor(n_eff)])
X_eff = np.average(X[:ceil(n_eff)], weights=w_eff)
eff_mass_flow = (X_eff/X_0*cmps.x + (1-cmps.x))*C_0*V_eff/T[5]
eff.set_flow(eff_mass_flow, 'g/d', cmps.IDs)
V_was = min(T[6]*self._Q_WAS, V_total-V_eff)
X_as = (V_total*X_0 - V_eff*X_eff) / (V_total-V_eff)
C_as = (X_as/X_0*cmps.x + (1-cmps.x))*C_0
was_mass_flow = C_as*V_was/T[6]
sludge.set_flow(was_mass_flow, 'g/d', cmps.IDs)
if self._cache_state:
self._init_Vas = V_total - V_eff - V_was
self._init_Cas = C_as
def _design(self):
pass
def _compile_dC_dt(self, V0, Qin, Cin, C, fill, aer):
isa = isinstance
processes = _add_aeration_to_growth_model(aer, self._model)
if fill:
t = symbols('t')
mass_balance_terms = list(zip(Cin, C, processes.production_rates.rate_of_production))
C_dot_eqs = [(cin-c)/(t+V0/Qin) + r for cin, c, r in mass_balance_terms]
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify([t]+C, C_dot_eqs)
return C_dot(t, *y)
J = Matrix(dC_dt(t, C)).jacobian(C)
else:
C_dot_eqs = processes.production_rates.rate_of_production
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify(C, C_dot_eqs)
return C_dot(*y)
J = Matrix(dC_dt(None, C)).jacobian(C)
def J_func(t, y):
J_func = lambdify(C, J)
return J_func(*y)
return (dC_dt, J_func)
# class PFR(SanUnit):
# _N_ins = 1
# _N_outs = 2
# def __init__(self, ID='', ins=None, outs=(), **kwargs):
# SanUnit.__init__(self, ID, ins, outs)
# def _run(self, steady_state=True):
# pass
# def _design(self):
# pass
| 38.412727
| 118
| 0.576229
|
from .. import SanUnit, WasteStream, Process, Processes, CompiledProcesses
from ._clarifier import _settling_flux
from sympy import symbols, lambdify, Matrix
from scipy.integrate import solve_ivp
from warnings import warn
from math import floor, ceil
import numpy as np
import pandas as pd
from numba import njit
__all__ = ('CSTR',
'SBR',
)
def _add_aeration_to_growth_model(aer, model):
if isinstance(aer, Process):
processes = Processes(model.tuple)
processes.append(aer)
processes.compile()
else:
processes = model
processes.compile()
return processes
@njit(cache=True)
def dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
@njit(cache=True)
def dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
class CSTR(SanUnit):
_N_ins = 3
_N_outs = 1
_ins_size_is_fixed = False
_outs_size_is_fixed = False
def __init__(self, ID='', ins=None, outs=(), split=None, thermo=None,
init_with='WasteStream', V_max=1000, aeration=2.0,
DO_ID='S_O2', suspended_growth_model=None,
isdynamic=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with, isdynamic=isdynamic)
self._V_max = V_max
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._concs = None
self._mixed = WasteStream()
self.split = split
for attr, value in kwargs.items():
setattr(self, attr, value)
@property
def V_max(self):
return self._V_max
@V_max.setter
def V_max(self, Vm):
self._V_max = Vm
@property
def aeration(self):
return self._aeration
@aeration.setter
def aeration(self, ae):
if ae is None or isinstance(ae, Process): self._aeration = ae
elif isinstance(ae, (float, int)):
if ae < 0:
raise ValueError('targeted dissolved oxygen concentration for aeration must be non-negative.')
else:
if ae > 14:
warn(f'targeted dissolved oxygen concentration for {self.ID} might exceed the saturated level.')
self._aeration = ae
else:
raise TypeError(f'aeration must be one of the following types: float, '
f'int, Process, NoneType. Not {type(ae)}')
@property
def suspended_growth_model(self):
return self._model
@suspended_growth_model.setter
def suspended_growth_model(self, model):
if isinstance(model, CompiledProcesses) or model is None: self._model = model
else: raise TypeError(f'suspended_growth_model must be one of the following '
f'types: CompiledProesses, NoneType. Not {type(model)}')
@property
def DO_ID(self):
return self._DO_ID
@DO_ID.setter
def DO_ID(self, doid):
if doid not in self.components.IDs:
raise ValueError(f'DO_ID must be in the set of `CompiledComponents` used to set thermo, '
f'i.e., one of {self.components.IDs}.')
self._DO_ID = doid
@property
def split(self):
return self._split
@split.setter
def split(self, split):
if split is None: self._split = split
else:
if len(split) != len(self._outs):
raise ValueError('split and outs must have the same size')
self._split = np.array(split)/sum(split)
@property
def state(self):
if self._state is None: return None
else:
return dict(zip(list(self.components.IDs) + ['Q'], self._state))
@state.setter
def state(self, QCs):
QCs = np.asarray(QCs)
if QCs.shape != (len(self.components)+1, ):
raise ValueError(f'state must be a 1D array of length {len(self.components) + 1},'
'indicating component concentrations [mg/L] and total flow rate [m^3/d]')
self._state = QCs
def set_init_conc(self, **kwargs):
Cs = np.zeros(len(self.components))
cmpx = self.components.index
for k, v in kwargs.items(): Cs[cmpx(k)] = v
self._concs = Cs
def _init_state(self):
mixed = self._mixed
Q = mixed.get_total_flow('m3/d')
if self._concs is not None: Cs = self._concs
else: Cs = mixed.conc
self._state = np.append(Cs, Q).astype('float64')
self._dstate = self._state * 0.
def _update_state(self):
arr = self._state
if self.split is None: self._outs[0].state = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.state = y
def _update_dstate(self):
arr = self._dstate
if self.split is None: self._outs[0].dstate = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.dstate = y
def _run(self):
mixed = self._mixed
mixed.mix_from(self.ins)
Q = mixed.F_vol
if self.split is None: self.outs[0].copy_like(mixed)
else:
for ws, spl in zip(self._outs, self.split):
ws.copy_like(mixed)
ws.set_total_flow(Q*spl, 'm3/hr')
def get_retained_mass(self, biomass_IDs):
cmps = self.components
mass = cmps.i_mass * self._state[:-1]
return self._V_max * mass[cmps.indices(biomass_IDs)].sum()
@property
def ODE(self):
if self._ODE is None:
self._compile_ODE()
return self._ODE
def _compile_ODE(self):
isa = isinstance
C = list(symbols(self.components.IDs))
m = len(C)
if self._model is None:
warn(f'{self.ID} was initialized without a suspended growth model, '
f'and thus run as a non-reactive unit')
r = lambda *args: np.zeros(m)
else:
processes = _add_aeration_to_growth_model(self._aeration, self._model)
r_eqs = list(processes.production_rates.rate_of_production)
r = lambdify(C, r_eqs, 'numpy')
_dstate = self._dstate
_update_dstate = self._update_dstate
V_arr = np.full(m, self._V_max)
Q_e_arr = np.zeros(m)
if isa(self._aeration, (float, int)):
i = self.components.index(self._DO_ID)
fixed_DO = self._aeration
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
Cs[i] = fixed_DO
dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_dstate[i] = 0
_update_dstate()
else:
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_update_dstate()
self._ODE = dy_dt
def _design(self):
pass
class SBR(SanUnit):
_N_ins = 1
_N_outs = 2
def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',
surface_area=1500, height=4,
operation_cycle=(0.5, 1.5, 2.0, 0, 1.0, 0.5, 0.1),
aeration=(None, None, None, 2.0), DO_ID='S_O2',
suspended_growth_model=None, N_layer=10,
pumped_flow=None, underflow=None,
X_threshold=3000, v_max=474, v_max_practical=250,
rh=5.76e-4, rp=2.86e-3, fns=2.28e-3,
cache_state=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with)
self._V = surface_area * height
self._A = surface_area
self._h = height
self._operation_cycle = operation_cycle
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._N_layer = N_layer
self._Q_e = pumped_flow
self._Q_WAS = underflow
self._X_t = X_threshold
self._v_max = v_max
self._v_max_p = v_max_practical
self._rh = rh
self._rp = rp
self._fns = fns
self._cache_state = cache_state
for attr, value in kwargs.items():
setattr(self, attr, value)
self._init_Vas = None
self._init_Cas = None
self._dynamic_composition = None
@property
def operation_cycle(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2', 'settle', 'decant', 'desludge'),
self._operation_cycle))
@property
def total_cycle_time(self):
return sum(self._operation_cycle)
@property
def aeration(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2'),
self._aeration[:4]))
@property
def C_t(self):
if self._dynamic_composition:
return pd.DataFrame(self._dynamic_composition,
columns = ['Time[d]'] + list(self.components.IDs))
else: return None
def _run(self, cache_state=True):
if self._model is None:
raise RuntimeError(f'{self.ID} was initialized without a suspended growth model.')
else:
isa = isinstance
inf = self.ins[0]
Q_in = inf.get_total_flow('m3/d')
eff, sludge = self.outs
eff.copy_like(inf)
sludge.copy_like(inf)
C_in = inf.mass / inf.F_vol * 1e3
cmps = self.components
C = list(symbols(cmps.IDs))
if self._init_Vas is not None:
V_0 = self._init_Vas
C_0 = self._init_Cas
else:
V_0 = 0
C_0 = C_in
n = self._N_layer
if self._aeration.count(None) == len(self._aeration):
Vmax = self._V
hj = self._h/n
else:
Vmax = self._V*0.75
hj = self._h*0.75/n
T_fill = (Vmax - V_0)/Q_in
T = [t/24 for t in self._operation_cycle]
if T_fill <= T[0]:
schedule = [T_fill, T[0]-T_fill] + T[1:4]
aer = [self._aeration[0], self._aeration[0]] + list(self._aeration[1:4])
fill = [True] + [False]*4
V_total = Vmax
elif T_fill <= T[0]+T[1]:
schedule = [T[0], T_fill-T[0], T[0]+T[1]-T_fill] + T[2:4]
aer = list(self._aeration[:2]) + [self._aeration[1]] + list(self._aeration[2:4])
fill = [True]*2 + [False]*3
V_total = Vmax
else:
schedule = T[:4]
aer = list(self._aeration[:4])
fill = [True]*2 + [False]*2
V_total = Q_in*(T[0]+T[1])+V_0
hj = V_total/self._V*self._h/n
for i in range(1, len(schedule)):
if fill[-i] == fill[-i-1] and aer[-i] == aer[-i-1]:
schedule[-i-1] += schedule[-i]
schedule[-i] = 0
t_arr = np.array([])
y_mat = np.ndarray([])
for i in range(len(schedule)):
if schedule[i] > 0:
dC_dt, J_func = self._compile_dC_dt(V_0, Q_in, C_in, C, fill[i], aer[i])
if isa(aer[i], (float, int)): C_0[cmps.index(self._DO_ID)] = aer[i]
sol = solve_ivp(dC_dt, (0, schedule[i]), C_0, method='BDF', jac=J_func)
C_0 = sol.y.transpose()[-1]
V_0 += Q_in * schedule[i] * fill[i]
t_arr = np.concatenate((t_arr, sol.t + t_arr[-1]))
y_mat = np.hstack((y_mat, sol.y))
self._dynamic_composition = np.vstack((t_arr, y_mat)).transpose()
eff.set_flow(C_0*eff.F_vol, 'g/hr', self.components.IDs)
X_0 = eff.get_TSS()
X_min = X_0 * self._fns
T_settle = T[4]
def dX_dt(t, X):
VX = [_settling_flux(x, self._v_max, self._v_max_p, X_min, self._rh, self._rp) for x in X]
J = [VX[j] if X[j+1] <= self._X_t else min(VX[j], VX[j+1]) for j in range(n-1)]
settle_out = np.array(J + [0])
settle_in = np.array([0] + J)
dXdt = (settle_in - settle_out)/hj
return dXdt
sol = solve_ivp(dX_dt, (0, T_settle), np.ones(n)*X_0)
X = sol.y.transpose()[-1]
V_eff = min(T[5]*self._Q_e, V_total*(n-1)/n)
n_eff = V_eff/V_total
w_eff = np.array([1]*floor(n_eff)+[n_eff-floor(n_eff)])
X_eff = np.average(X[:ceil(n_eff)], weights=w_eff)
eff_mass_flow = (X_eff/X_0*cmps.x + (1-cmps.x))*C_0*V_eff/T[5]
eff.set_flow(eff_mass_flow, 'g/d', cmps.IDs)
V_was = min(T[6]*self._Q_WAS, V_total-V_eff)
X_as = (V_total*X_0 - V_eff*X_eff) / (V_total-V_eff)
C_as = (X_as/X_0*cmps.x + (1-cmps.x))*C_0
was_mass_flow = C_as*V_was/T[6]
sludge.set_flow(was_mass_flow, 'g/d', cmps.IDs)
if self._cache_state:
self._init_Vas = V_total - V_eff - V_was
self._init_Cas = C_as
def _design(self):
pass
def _compile_dC_dt(self, V0, Qin, Cin, C, fill, aer):
isa = isinstance
processes = _add_aeration_to_growth_model(aer, self._model)
if fill:
t = symbols('t')
mass_balance_terms = list(zip(Cin, C, processes.production_rates.rate_of_production))
C_dot_eqs = [(cin-c)/(t+V0/Qin) + r for cin, c, r in mass_balance_terms]
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify([t]+C, C_dot_eqs)
return C_dot(t, *y)
J = Matrix(dC_dt(t, C)).jacobian(C)
else:
C_dot_eqs = processes.production_rates.rate_of_production
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify(C, C_dot_eqs)
return C_dot(*y)
J = Matrix(dC_dt(None, C)).jacobian(C)
def J_func(t, y):
J_func = lambdify(C, J)
return J_func(*y)
return (dC_dt, J_func)
| true
| true
|
79022949262001f47530fe0b92a54b64b5b28cf9
| 12,246
|
py
|
Python
|
tests/test_waterheatermixed.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19
|
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_waterheatermixed.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_waterheatermixed.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7
|
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterMixed
log = logging.getLogger(__name__)
class TestWaterHeaterMixed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheatermixed(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterMixed()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_tank_volume = 0.0
obj.tank_volume = var_tank_volume
# object-list
var_setpoint_temperature_schedule_name = "object-list|Setpoint Temperature Schedule Name"
obj.setpoint_temperature_schedule_name = var_setpoint_temperature_schedule_name
# real
var_deadband_temperature_difference = 0.0
obj.deadband_temperature_difference = var_deadband_temperature_difference
# real
var_maximum_temperature_limit = 5.5
obj.maximum_temperature_limit = var_maximum_temperature_limit
# alpha
var_heater_control_type = "Cycle"
obj.heater_control_type = var_heater_control_type
# real
var_heater_maximum_capacity = 0.0
obj.heater_maximum_capacity = var_heater_maximum_capacity
# real
var_heater_minimum_capacity = 0.0
obj.heater_minimum_capacity = var_heater_minimum_capacity
# real
var_heater_ignition_minimum_flow_rate = 0.0
obj.heater_ignition_minimum_flow_rate = var_heater_ignition_minimum_flow_rate
# real
var_heater_ignition_delay = 0.0
obj.heater_ignition_delay = var_heater_ignition_delay
# alpha
var_heater_fuel_type = "Electricity"
obj.heater_fuel_type = var_heater_fuel_type
# real
var_heater_thermal_efficiency = 0.50005
obj.heater_thermal_efficiency = var_heater_thermal_efficiency
# object-list
var_part_load_factor_curve_name = "object-list|Part Load Factor Curve Name"
obj.part_load_factor_curve_name = var_part_load_factor_curve_name
# real
var_off_cycle_parasitic_fuel_consumption_rate = 0.0
obj.off_cycle_parasitic_fuel_consumption_rate = var_off_cycle_parasitic_fuel_consumption_rate
# alpha
var_off_cycle_parasitic_fuel_type = "Electricity"
obj.off_cycle_parasitic_fuel_type = var_off_cycle_parasitic_fuel_type
# real
var_off_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.off_cycle_parasitic_heat_fraction_to_tank = var_off_cycle_parasitic_heat_fraction_to_tank
# real
var_on_cycle_parasitic_fuel_consumption_rate = 0.0
obj.on_cycle_parasitic_fuel_consumption_rate = var_on_cycle_parasitic_fuel_consumption_rate
# alpha
var_on_cycle_parasitic_fuel_type = "Electricity"
obj.on_cycle_parasitic_fuel_type = var_on_cycle_parasitic_fuel_type
# real
var_on_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.on_cycle_parasitic_heat_fraction_to_tank = var_on_cycle_parasitic_heat_fraction_to_tank
# alpha
var_ambient_temperature_indicator = "Schedule"
obj.ambient_temperature_indicator = var_ambient_temperature_indicator
# object-list
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
# object-list
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
# node
var_ambient_temperature_outdoor_air_node_name = "node|Ambient Temperature Outdoor Air Node Name"
obj.ambient_temperature_outdoor_air_node_name = var_ambient_temperature_outdoor_air_node_name
# real
var_off_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.off_cycle_loss_coefficient_to_ambient_temperature = var_off_cycle_loss_coefficient_to_ambient_temperature
# real
var_off_cycle_loss_fraction_to_zone = 0.5
obj.off_cycle_loss_fraction_to_zone = var_off_cycle_loss_fraction_to_zone
# real
var_on_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.on_cycle_loss_coefficient_to_ambient_temperature = var_on_cycle_loss_coefficient_to_ambient_temperature
# real
var_on_cycle_loss_fraction_to_zone = 0.5
obj.on_cycle_loss_fraction_to_zone = var_on_cycle_loss_fraction_to_zone
# real
var_peak_use_flow_rate = 0.0
obj.peak_use_flow_rate = var_peak_use_flow_rate
# object-list
var_use_flow_rate_fraction_schedule_name = "object-list|Use Flow Rate Fraction Schedule Name"
obj.use_flow_rate_fraction_schedule_name = var_use_flow_rate_fraction_schedule_name
# object-list
var_cold_water_supply_temperature_schedule_name = "object-list|Cold Water Supply Temperature Schedule Name"
obj.cold_water_supply_temperature_schedule_name = var_cold_water_supply_temperature_schedule_name
# node
var_use_side_inlet_node_name = "node|Use Side Inlet Node Name"
obj.use_side_inlet_node_name = var_use_side_inlet_node_name
# node
var_use_side_outlet_node_name = "node|Use Side Outlet Node Name"
obj.use_side_outlet_node_name = var_use_side_outlet_node_name
# real
var_use_side_effectiveness = 0.5
obj.use_side_effectiveness = var_use_side_effectiveness
# node
var_source_side_inlet_node_name = "node|Source Side Inlet Node Name"
obj.source_side_inlet_node_name = var_source_side_inlet_node_name
# node
var_source_side_outlet_node_name = "node|Source Side Outlet Node Name"
obj.source_side_outlet_node_name = var_source_side_outlet_node_name
# real
var_source_side_effectiveness = 0.5
obj.source_side_effectiveness = var_source_side_effectiveness
# real
var_use_side_design_flow_rate = 0.0
obj.use_side_design_flow_rate = var_use_side_design_flow_rate
# real
var_source_side_design_flow_rate = 0.0
obj.source_side_design_flow_rate = var_source_side_design_flow_rate
# real
var_indirect_water_heating_recovery_time = 0.0001
obj.indirect_water_heating_recovery_time = var_indirect_water_heating_recovery_time
# alpha
var_source_side_flow_control_mode = "StorageTank"
obj.source_side_flow_control_mode = var_source_side_flow_control_mode
# object-list
var_indirect_alternate_setpoint_temperature_schedule_name = "object-list|Indirect Alternate Setpoint Temperature Schedule Name"
obj.indirect_alternate_setpoint_temperature_schedule_name = var_indirect_alternate_setpoint_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheatermixeds[0].name, var_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].tank_volume, var_tank_volume)
self.assertEqual(idf2.waterheatermixeds[0].setpoint_temperature_schedule_name, var_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].deadband_temperature_difference, var_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheatermixeds[0].maximum_temperature_limit, var_maximum_temperature_limit)
self.assertEqual(idf2.waterheatermixeds[0].heater_control_type, var_heater_control_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_maximum_capacity, var_heater_maximum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_minimum_capacity, var_heater_minimum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_minimum_flow_rate, var_heater_ignition_minimum_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_delay, var_heater_ignition_delay)
self.assertEqual(idf2.waterheatermixeds[0].heater_fuel_type, var_heater_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_thermal_efficiency, var_heater_thermal_efficiency)
self.assertEqual(idf2.waterheatermixeds[0].part_load_factor_curve_name, var_part_load_factor_curve_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_consumption_rate, var_off_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_type, var_off_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_heat_fraction_to_tank, var_off_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_consumption_rate, var_on_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_type, var_on_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_heat_fraction_to_tank, var_on_cycle_parasitic_heat_fraction_to_tank)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_indicator, var_ambient_temperature_indicator)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_outdoor_air_node_name, var_ambient_temperature_outdoor_air_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_coefficient_to_ambient_temperature, var_off_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_fraction_to_zone, var_off_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_coefficient_to_ambient_temperature, var_on_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_fraction_to_zone, var_on_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].peak_use_flow_rate, var_peak_use_flow_rate)
self.assertEqual(idf2.waterheatermixeds[0].use_flow_rate_fraction_schedule_name, var_use_flow_rate_fraction_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].cold_water_supply_temperature_schedule_name, var_cold_water_supply_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_inlet_node_name, var_use_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_outlet_node_name, var_use_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_effectiveness, var_use_side_effectiveness)
self.assertEqual(idf2.waterheatermixeds[0].source_side_inlet_node_name, var_source_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].source_side_outlet_node_name, var_source_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_effectiveness, var_source_side_effectiveness)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_design_flow_rate, var_use_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_design_flow_rate, var_source_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].indirect_water_heating_recovery_time, var_indirect_water_heating_recovery_time)
self.assertEqual(idf2.waterheatermixeds[0].source_side_flow_control_mode, var_source_side_flow_control_mode)
self.assertEqual(idf2.waterheatermixeds[0].indirect_alternate_setpoint_temperature_schedule_name, var_indirect_alternate_setpoint_temperature_schedule_name)
| 61.848485
| 164
| 0.787686
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterMixed
log = logging.getLogger(__name__)
class TestWaterHeaterMixed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheatermixed(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterMixed()
var_name = "Name"
obj.name = var_name
var_tank_volume = 0.0
obj.tank_volume = var_tank_volume
var_setpoint_temperature_schedule_name = "object-list|Setpoint Temperature Schedule Name"
obj.setpoint_temperature_schedule_name = var_setpoint_temperature_schedule_name
var_deadband_temperature_difference = 0.0
obj.deadband_temperature_difference = var_deadband_temperature_difference
var_maximum_temperature_limit = 5.5
obj.maximum_temperature_limit = var_maximum_temperature_limit
var_heater_control_type = "Cycle"
obj.heater_control_type = var_heater_control_type
var_heater_maximum_capacity = 0.0
obj.heater_maximum_capacity = var_heater_maximum_capacity
var_heater_minimum_capacity = 0.0
obj.heater_minimum_capacity = var_heater_minimum_capacity
var_heater_ignition_minimum_flow_rate = 0.0
obj.heater_ignition_minimum_flow_rate = var_heater_ignition_minimum_flow_rate
var_heater_ignition_delay = 0.0
obj.heater_ignition_delay = var_heater_ignition_delay
var_heater_fuel_type = "Electricity"
obj.heater_fuel_type = var_heater_fuel_type
var_heater_thermal_efficiency = 0.50005
obj.heater_thermal_efficiency = var_heater_thermal_efficiency
var_part_load_factor_curve_name = "object-list|Part Load Factor Curve Name"
obj.part_load_factor_curve_name = var_part_load_factor_curve_name
var_off_cycle_parasitic_fuel_consumption_rate = 0.0
obj.off_cycle_parasitic_fuel_consumption_rate = var_off_cycle_parasitic_fuel_consumption_rate
var_off_cycle_parasitic_fuel_type = "Electricity"
obj.off_cycle_parasitic_fuel_type = var_off_cycle_parasitic_fuel_type
var_off_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.off_cycle_parasitic_heat_fraction_to_tank = var_off_cycle_parasitic_heat_fraction_to_tank
var_on_cycle_parasitic_fuel_consumption_rate = 0.0
obj.on_cycle_parasitic_fuel_consumption_rate = var_on_cycle_parasitic_fuel_consumption_rate
var_on_cycle_parasitic_fuel_type = "Electricity"
obj.on_cycle_parasitic_fuel_type = var_on_cycle_parasitic_fuel_type
var_on_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.on_cycle_parasitic_heat_fraction_to_tank = var_on_cycle_parasitic_heat_fraction_to_tank
var_ambient_temperature_indicator = "Schedule"
obj.ambient_temperature_indicator = var_ambient_temperature_indicator
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
var_ambient_temperature_outdoor_air_node_name = "node|Ambient Temperature Outdoor Air Node Name"
obj.ambient_temperature_outdoor_air_node_name = var_ambient_temperature_outdoor_air_node_name
var_off_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.off_cycle_loss_coefficient_to_ambient_temperature = var_off_cycle_loss_coefficient_to_ambient_temperature
var_off_cycle_loss_fraction_to_zone = 0.5
obj.off_cycle_loss_fraction_to_zone = var_off_cycle_loss_fraction_to_zone
var_on_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.on_cycle_loss_coefficient_to_ambient_temperature = var_on_cycle_loss_coefficient_to_ambient_temperature
var_on_cycle_loss_fraction_to_zone = 0.5
obj.on_cycle_loss_fraction_to_zone = var_on_cycle_loss_fraction_to_zone
var_peak_use_flow_rate = 0.0
obj.peak_use_flow_rate = var_peak_use_flow_rate
var_use_flow_rate_fraction_schedule_name = "object-list|Use Flow Rate Fraction Schedule Name"
obj.use_flow_rate_fraction_schedule_name = var_use_flow_rate_fraction_schedule_name
var_cold_water_supply_temperature_schedule_name = "object-list|Cold Water Supply Temperature Schedule Name"
obj.cold_water_supply_temperature_schedule_name = var_cold_water_supply_temperature_schedule_name
var_use_side_inlet_node_name = "node|Use Side Inlet Node Name"
obj.use_side_inlet_node_name = var_use_side_inlet_node_name
var_use_side_outlet_node_name = "node|Use Side Outlet Node Name"
obj.use_side_outlet_node_name = var_use_side_outlet_node_name
var_use_side_effectiveness = 0.5
obj.use_side_effectiveness = var_use_side_effectiveness
var_source_side_inlet_node_name = "node|Source Side Inlet Node Name"
obj.source_side_inlet_node_name = var_source_side_inlet_node_name
var_source_side_outlet_node_name = "node|Source Side Outlet Node Name"
obj.source_side_outlet_node_name = var_source_side_outlet_node_name
var_source_side_effectiveness = 0.5
obj.source_side_effectiveness = var_source_side_effectiveness
var_use_side_design_flow_rate = 0.0
obj.use_side_design_flow_rate = var_use_side_design_flow_rate
var_source_side_design_flow_rate = 0.0
obj.source_side_design_flow_rate = var_source_side_design_flow_rate
var_indirect_water_heating_recovery_time = 0.0001
obj.indirect_water_heating_recovery_time = var_indirect_water_heating_recovery_time
var_source_side_flow_control_mode = "StorageTank"
obj.source_side_flow_control_mode = var_source_side_flow_control_mode
var_indirect_alternate_setpoint_temperature_schedule_name = "object-list|Indirect Alternate Setpoint Temperature Schedule Name"
obj.indirect_alternate_setpoint_temperature_schedule_name = var_indirect_alternate_setpoint_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheatermixeds[0].name, var_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].tank_volume, var_tank_volume)
self.assertEqual(idf2.waterheatermixeds[0].setpoint_temperature_schedule_name, var_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].deadband_temperature_difference, var_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheatermixeds[0].maximum_temperature_limit, var_maximum_temperature_limit)
self.assertEqual(idf2.waterheatermixeds[0].heater_control_type, var_heater_control_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_maximum_capacity, var_heater_maximum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_minimum_capacity, var_heater_minimum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_minimum_flow_rate, var_heater_ignition_minimum_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_delay, var_heater_ignition_delay)
self.assertEqual(idf2.waterheatermixeds[0].heater_fuel_type, var_heater_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_thermal_efficiency, var_heater_thermal_efficiency)
self.assertEqual(idf2.waterheatermixeds[0].part_load_factor_curve_name, var_part_load_factor_curve_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_consumption_rate, var_off_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_type, var_off_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_heat_fraction_to_tank, var_off_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_consumption_rate, var_on_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_type, var_on_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_heat_fraction_to_tank, var_on_cycle_parasitic_heat_fraction_to_tank)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_indicator, var_ambient_temperature_indicator)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_outdoor_air_node_name, var_ambient_temperature_outdoor_air_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_coefficient_to_ambient_temperature, var_off_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_fraction_to_zone, var_off_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_coefficient_to_ambient_temperature, var_on_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_fraction_to_zone, var_on_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].peak_use_flow_rate, var_peak_use_flow_rate)
self.assertEqual(idf2.waterheatermixeds[0].use_flow_rate_fraction_schedule_name, var_use_flow_rate_fraction_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].cold_water_supply_temperature_schedule_name, var_cold_water_supply_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_inlet_node_name, var_use_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_outlet_node_name, var_use_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_effectiveness, var_use_side_effectiveness)
self.assertEqual(idf2.waterheatermixeds[0].source_side_inlet_node_name, var_source_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].source_side_outlet_node_name, var_source_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_effectiveness, var_source_side_effectiveness)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_design_flow_rate, var_use_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_design_flow_rate, var_source_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].indirect_water_heating_recovery_time, var_indirect_water_heating_recovery_time)
self.assertEqual(idf2.waterheatermixeds[0].source_side_flow_control_mode, var_source_side_flow_control_mode)
self.assertEqual(idf2.waterheatermixeds[0].indirect_alternate_setpoint_temperature_schedule_name, var_indirect_alternate_setpoint_temperature_schedule_name)
| true
| true
|
79022ae3850974f846ceba8fa65b58398682e79d
| 3,184
|
py
|
Python
|
tests/utils/wsgi/tests.py
|
ascan-io/raven-python
|
5b3f48c66269993a0202cfc988750e5fe66e0c00
|
[
"BSD-3-Clause"
] | 1,108
|
2015-01-02T01:20:00.000Z
|
2022-03-09T02:22:40.000Z
|
tests/utils/wsgi/tests.py
|
nvllsvm/raven-python
|
c4403f21973138cd20cf9c005da4fb934836d76e
|
[
"BSD-3-Clause"
] | 698
|
2015-01-04T11:12:57.000Z
|
2022-01-22T08:07:51.000Z
|
tests/utils/wsgi/tests.py
|
nvllsvm/raven-python
|
c4403f21973138cd20cf9c005da4fb934836d76e
|
[
"BSD-3-Clause"
] | 486
|
2015-01-04T09:00:33.000Z
|
2022-03-09T02:37:18.000Z
|
from raven.utils.testutils import TestCase
from raven.utils.wsgi import get_headers, get_host, get_environ, get_client_ip
class GetHeadersTest(TestCase):
def test_tuple_as_key(self):
result = dict(get_headers({
('a', 'tuple'): 'foo',
}))
self.assertEquals(result, {})
def test_coerces_http_name(self):
result = dict(get_headers({
'HTTP_ACCEPT': 'text/plain',
}))
self.assertIn('Accept', result)
self.assertEquals(result['Accept'], 'text/plain')
def test_coerces_content_type(self):
result = dict(get_headers({
'CONTENT_TYPE': 'text/plain',
}))
self.assertIn('Content-Type', result)
self.assertEquals(result['Content-Type'], 'text/plain')
def test_coerces_content_length(self):
result = dict(get_headers({
'CONTENT_LENGTH': '134',
}))
self.assertIn('Content-Length', result)
self.assertEquals(result['Content-Length'], '134')
class GetEnvironTest(TestCase):
def test_has_remote_addr(self):
result = dict(get_environ({'REMOTE_ADDR': '127.0.0.1'}))
self.assertIn('REMOTE_ADDR', result)
self.assertEquals(result['REMOTE_ADDR'], '127.0.0.1')
def test_has_server_name(self):
result = dict(get_environ({'SERVER_NAME': '127.0.0.1'}))
self.assertIn('SERVER_NAME', result)
self.assertEquals(result['SERVER_NAME'], '127.0.0.1')
def test_has_server_port(self):
result = dict(get_environ({'SERVER_PORT': 80}))
self.assertIn('SERVER_PORT', result)
self.assertEquals(result['SERVER_PORT'], 80)
def test_hides_wsgi_input(self):
result = list(get_environ({'wsgi.input': 'foo'}))
self.assertNotIn('wsgi.input', result)
class GetHostTest(TestCase):
def test_http_x_forwarded_host(self):
result = get_host({'HTTP_X_FORWARDED_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_host(self):
result = get_host({'HTTP_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '80',
})
self.assertEquals(result, 'example.com')
def test_https_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'https',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '443',
})
self.assertEquals(result, 'example.com')
def test_http_nonstandard_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '81',
})
self.assertEquals(result, 'example.com:81')
class GetClientIpTest(TestCase):
def test_has_remote_addr(self):
result = get_client_ip({'REMOTE_ADDR': '127.0.0.1'})
self.assertEquals(result, '127.0.0.1')
def test_xff(self):
result = get_client_ip({'HTTP_X_FORWARDED_FOR': '1.1.1.1, 127.0.0.1'})
self.assertEquals(result, '1.1.1.1')
| 32.824742
| 78
| 0.612437
|
from raven.utils.testutils import TestCase
from raven.utils.wsgi import get_headers, get_host, get_environ, get_client_ip
class GetHeadersTest(TestCase):
def test_tuple_as_key(self):
result = dict(get_headers({
('a', 'tuple'): 'foo',
}))
self.assertEquals(result, {})
def test_coerces_http_name(self):
result = dict(get_headers({
'HTTP_ACCEPT': 'text/plain',
}))
self.assertIn('Accept', result)
self.assertEquals(result['Accept'], 'text/plain')
def test_coerces_content_type(self):
result = dict(get_headers({
'CONTENT_TYPE': 'text/plain',
}))
self.assertIn('Content-Type', result)
self.assertEquals(result['Content-Type'], 'text/plain')
def test_coerces_content_length(self):
result = dict(get_headers({
'CONTENT_LENGTH': '134',
}))
self.assertIn('Content-Length', result)
self.assertEquals(result['Content-Length'], '134')
class GetEnvironTest(TestCase):
def test_has_remote_addr(self):
result = dict(get_environ({'REMOTE_ADDR': '127.0.0.1'}))
self.assertIn('REMOTE_ADDR', result)
self.assertEquals(result['REMOTE_ADDR'], '127.0.0.1')
def test_has_server_name(self):
result = dict(get_environ({'SERVER_NAME': '127.0.0.1'}))
self.assertIn('SERVER_NAME', result)
self.assertEquals(result['SERVER_NAME'], '127.0.0.1')
def test_has_server_port(self):
result = dict(get_environ({'SERVER_PORT': 80}))
self.assertIn('SERVER_PORT', result)
self.assertEquals(result['SERVER_PORT'], 80)
def test_hides_wsgi_input(self):
result = list(get_environ({'wsgi.input': 'foo'}))
self.assertNotIn('wsgi.input', result)
class GetHostTest(TestCase):
def test_http_x_forwarded_host(self):
result = get_host({'HTTP_X_FORWARDED_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_host(self):
result = get_host({'HTTP_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '80',
})
self.assertEquals(result, 'example.com')
def test_https_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'https',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '443',
})
self.assertEquals(result, 'example.com')
def test_http_nonstandard_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '81',
})
self.assertEquals(result, 'example.com:81')
class GetClientIpTest(TestCase):
def test_has_remote_addr(self):
result = get_client_ip({'REMOTE_ADDR': '127.0.0.1'})
self.assertEquals(result, '127.0.0.1')
def test_xff(self):
result = get_client_ip({'HTTP_X_FORWARDED_FOR': '1.1.1.1, 127.0.0.1'})
self.assertEquals(result, '1.1.1.1')
| true
| true
|
79022b02587d8187215cec19bd318291e2285fc1
| 1,650
|
py
|
Python
|
Assignment/Environmental_Project/part_A.py
|
Maruja/Maruja-ILAS-Python
|
af304bfa2767fb30982e88d4b2138113237ba99d
|
[
"MIT"
] | null | null | null |
Assignment/Environmental_Project/part_A.py
|
Maruja/Maruja-ILAS-Python
|
af304bfa2767fb30982e88d4b2138113237ba99d
|
[
"MIT"
] | null | null | null |
Assignment/Environmental_Project/part_A.py
|
Maruja/Maruja-ILAS-Python
|
af304bfa2767fb30982e88d4b2138113237ba99d
|
[
"MIT"
] | null | null | null |
from pandas import read_csv
from IPython.display import display
import numpy as np
import sys
import math
###############################
####Maria Eugenia Lopez #####
###############################
def fully_grown_depuration(number_to_remove=0.50):
return plants.loc[plants.height_m > number_to_remove]
def convert_GPS_lat_long(df):
for index, row in df.iterrows():
lat_viejo = row["GPS_lat"]
latVal = (40008000*row["GPS_lat"])/360
#res= div*0.001#to convert to Klm
df.loc[index,"GPS_lat"] = latVal
lat_radians = math.radians(lat_viejo)
lonVal = (40075160*row["GPS_lon"])/360
lonVal = lonVal*math.cos(lat_radians)
#res = res*0.001
df.loc[index,"GPS_lon"] = lonVal
##----------------------------------------
##Part A Assembling a Data Set
##----------------------------------------
##----------------------------------------
##Input and Output: Data Frames
plants = read_csv('environmental_survey/plants2017.csv',
index_col=0)
plants.reset_index(level=0,inplace=True)
plants.drop(plants.index[plants.Plant == 'tree'], inplace=True)
#display(plants.head(n=50))
plants.reset_index(drop=True,inplace=True)
##----------------------------------------
##Functions
convert_GPS_lat_long( plants)
plants.rename(columns={'GPS_lon':'Meters_lon',
'GPS_lat':'Meters_lat'}, inplace=True)
##----------------------------------------
##Functions and Data Structures: Boolean Indexing
heiht_set_by_user = float(input("Set the height that you want: ") or "0.5")
plants = fully_grown_depuration(float(heiht_set_by_user))
#reseting the index after the depuration
plants.reset_index(drop=True,inplace=True)
display(plants)
| 27.04918
| 75
| 0.621212
|
from pandas import read_csv
from IPython.display import display
import numpy as np
import sys
import math
| true
| true
|
79022c0a04e64d398c42c8992ac71551c676d280
| 248
|
py
|
Python
|
encuestaapp/encuestaapp/doctype/encuesta/encuesta.py
|
ErickLopez76/encuestaapp
|
f700ce3bf1b1d5decaf511876a705e1bb2894168
|
[
"MIT"
] | null | null | null |
encuestaapp/encuestaapp/doctype/encuesta/encuesta.py
|
ErickLopez76/encuestaapp
|
f700ce3bf1b1d5decaf511876a705e1bb2894168
|
[
"MIT"
] | null | null | null |
encuestaapp/encuestaapp/doctype/encuesta/encuesta.py
|
ErickLopez76/encuestaapp
|
f700ce3bf1b1d5decaf511876a705e1bb2894168
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, SIS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Encuesta(Document):
pass
| 22.545455
| 49
| 0.774194
|
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Encuesta(Document):
pass
| true
| true
|
79022cd7e04ec932f9a0cabe49501c8ca21093a9
| 8,057
|
py
|
Python
|
cryptoapis/model/address_tokens_transaction_unconfirmed_omnilayertoken.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/address_tokens_transaction_unconfirmed_omnilayertoken.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/address_tokens_transaction_unconfirmed_omnilayertoken.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'property_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'created_by_transaction_id': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'property_id': 'propertyId', # noqa: E501
'transaction_type': 'transactionType', # noqa: E501
'created_by_transaction_id': 'createdByTransactionId', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 43.085561
| 484
| 0.60767
|
import re
import sys
from cryptoapis.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
return {
'name': (str,),
'property_id': (str,),
'transaction_type': (str,),
'created_by_transaction_id': (str,),
'amount': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name',
'property_id': 'propertyId',
'transaction_type': 'transactionType',
'created_by_transaction_id': 'createdByTransactionId',
'amount': 'amount',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true
| true
|
79022d91cb29da2457287e5fc0b7d5dcf474992c
| 785
|
py
|
Python
|
galaxy_api/api/v3/serializers/namespace.py
|
newswangerd/galaxy-api
|
af38b8f8931d3e0e6d43c0ab3e752305a9e59241
|
[
"Apache-2.0"
] | null | null | null |
galaxy_api/api/v3/serializers/namespace.py
|
newswangerd/galaxy-api
|
af38b8f8931d3e0e6d43c0ab3e752305a9e59241
|
[
"Apache-2.0"
] | null | null | null |
galaxy_api/api/v3/serializers/namespace.py
|
newswangerd/galaxy-api
|
af38b8f8931d3e0e6d43c0ab3e752305a9e59241
|
[
"Apache-2.0"
] | null | null | null |
from django.db import transaction
from rest_framework.serializers import ModelSerializer
from galaxy_api.api import models
class NamespaceLinkSerializer(ModelSerializer):
class Meta:
model = models.NamespaceLink
fields = ('name', 'url')
class NamespaceSerializer(ModelSerializer):
links = NamespaceLinkSerializer(many=True)
class Meta:
model = models.Namespace
fields = ('name', 'company', 'email', 'avatar_url', 'description', 'links')
read_only_fields = ('name', )
def update(self, instance, validated_data):
links = validated_data.pop('links')
with transaction.atomic():
instance = super().update(instance, validated_data)
instance.update_links(links)
return instance
| 26.166667
| 83
| 0.677707
|
from django.db import transaction
from rest_framework.serializers import ModelSerializer
from galaxy_api.api import models
class NamespaceLinkSerializer(ModelSerializer):
class Meta:
model = models.NamespaceLink
fields = ('name', 'url')
class NamespaceSerializer(ModelSerializer):
links = NamespaceLinkSerializer(many=True)
class Meta:
model = models.Namespace
fields = ('name', 'company', 'email', 'avatar_url', 'description', 'links')
read_only_fields = ('name', )
def update(self, instance, validated_data):
links = validated_data.pop('links')
with transaction.atomic():
instance = super().update(instance, validated_data)
instance.update_links(links)
return instance
| true
| true
|
79022dba482c5c0b18be51a3860de0819c255794
| 2,432
|
py
|
Python
|
app.py
|
RodolfoFerro/iris-api
|
3034a1629d28feb215be2fdbf24edbd1176ff0d6
|
[
"MIT"
] | null | null | null |
app.py
|
RodolfoFerro/iris-api
|
3034a1629d28feb215be2fdbf24edbd1176ff0d6
|
[
"MIT"
] | null | null | null |
app.py
|
RodolfoFerro/iris-api
|
3034a1629d28feb215be2fdbf24edbd1176ff0d6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ===============================================================
# Author: Rodolfo Ferro
# Email: ferro@cimat.mx
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro, for
# his workshop in HackSureste 2019 at Universidad Modelo
# in Mérida. Any explicit usage of this script or its
# contents is granted according to the license provided and
# its conditions.
# ===============================================================
from flask import Flask, jsonify, request, render_template
from iris import iris_classifier
from pprint import pprint
import numpy as np
import requests
import json
# Main app:
app = Flask(__name__)
# Global:
version = 'v0.0'
classifier = iris_classifier()
species = {
'0': 'I. setosa',
'1': 'I. versicolor',
'2': 'I. virginica'
}
# Static website:
@app.route('/')
def index():
return render_template("index.html")
# API MAIN STRUCTURE:
@app.route('/api/' + version, methods=['GET'])
def test():
"""
GET method to test the API.
"""
# Output message:
message = {
"response": [
{
"text": "Hello world!"
}
]
}
return jsonify(message)
@app.route('/api/' + version + '/predict', methods=['POST'])
def predict():
"""
POST method to predict with our classification model.
"""
# Get data from JSON object in POST method:
req_data = request.get_json()
# Parse data from JSON:
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
# Predict with model:
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
# Output message:
message = {"response": [
{"input": {
'sepal_length': sl,
'sepal_width': sw,
'petal_length': pl,
'petal_width': pw
}},
{"prediction": int(prediction[0])},
{"species": species[str(prediction[0])]}]}
return jsonify(message)
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
if __name__ == '__main__':
app.run(debug=True, port=5000)
| 22.518519
| 65
| 0.581003
|
from flask import Flask, jsonify, request, render_template
from iris import iris_classifier
from pprint import pprint
import numpy as np
import requests
import json
app = Flask(__name__)
version = 'v0.0'
classifier = iris_classifier()
species = {
'0': 'I. setosa',
'1': 'I. versicolor',
'2': 'I. virginica'
}
@app.route('/')
def index():
return render_template("index.html")
@app.route('/api/' + version, methods=['GET'])
def test():
message = {
"response": [
{
"text": "Hello world!"
}
]
}
return jsonify(message)
@app.route('/api/' + version + '/predict', methods=['POST'])
def predict():
req_data = request.get_json()
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
message = {"response": [
{"input": {
'sepal_length': sl,
'sepal_width': sw,
'petal_length': pl,
'petal_width': pw
}},
{"prediction": int(prediction[0])},
{"species": species[str(prediction[0])]}]}
return jsonify(message)
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
if __name__ == '__main__':
app.run(debug=True, port=5000)
| true
| true
|
79022e001b0a8fc66527c0a45e9e3e15b278f859
| 764
|
py
|
Python
|
tests/test_modules/test_builtin/test_stringmeta.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_builtin/test_stringmeta.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_builtin/test_stringmeta.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from malcolm.modules.builtin.vmetas import StringMeta
class TestValidate(unittest.TestCase):
def setUp(self):
self.string_meta = StringMeta("test string description")
def test_given_value_str_then_return(self):
response = self.string_meta.validate("TestValue")
assert "TestValue" == response
def test_given_value_int_then_cast_and_return(self):
response = self.string_meta.validate(15)
assert "15" == response
def test_given_value_float_then_cast_and_return(self):
response = self.string_meta.validate(12.8)
assert "12.8" == response
def test_given_value_None_then_return(self):
response = self.string_meta.validate(None)
assert "" == response
| 25.466667
| 64
| 0.708115
|
import unittest
from malcolm.modules.builtin.vmetas import StringMeta
class TestValidate(unittest.TestCase):
def setUp(self):
self.string_meta = StringMeta("test string description")
def test_given_value_str_then_return(self):
response = self.string_meta.validate("TestValue")
assert "TestValue" == response
def test_given_value_int_then_cast_and_return(self):
response = self.string_meta.validate(15)
assert "15" == response
def test_given_value_float_then_cast_and_return(self):
response = self.string_meta.validate(12.8)
assert "12.8" == response
def test_given_value_None_then_return(self):
response = self.string_meta.validate(None)
assert "" == response
| true
| true
|
79022edc1efa0813d11211efa020586eec17e0f1
| 1,163
|
py
|
Python
|
test/integration/daos/test_player_dao.py
|
jrj92280/python-eve-backend
|
c0566cdef5e5c75e2b75e59bde804e0d4ce407e3
|
[
"MIT"
] | null | null | null |
test/integration/daos/test_player_dao.py
|
jrj92280/python-eve-backend
|
c0566cdef5e5c75e2b75e59bde804e0d4ce407e3
|
[
"MIT"
] | null | null | null |
test/integration/daos/test_player_dao.py
|
jrj92280/python-eve-backend
|
c0566cdef5e5c75e2b75e59bde804e0d4ce407e3
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from chess_game.daos.player_dao import PlayerDao
from chess_game.models.player import Player
def test_player_dao_init(mongo_database):
player_dao = PlayerDao(mongo_database)
assert mongo_database == player_dao._mongo_database
def test_dao_create_and_find_player(mongo_database):
start_date = datetime.now()
player = Player(name="_Obi", stats={}, games=[], start_date=start_date)
player_dao = PlayerDao(mongo_database)
player_id = player_dao.create(player)
loaded_player = player_dao.find_by_id(player_id)
assert loaded_player['_id']
assert "_Obi" == loaded_player['name']
assert {} == loaded_player['stats']
assert [] == loaded_player['games']
assert f'{start_date:%Y-%m-%d %H:%M:%S}' == loaded_player['start_date']
def test_dao_create_and_find_players(mongo_database):
player = Player()
player_dao = PlayerDao(mongo_database)
player_dao.create(player)
player_id = player_dao.create(player)
loaded_players = player_dao.find_all()
assert len(loaded_players) > 1
assert len([player for player in loaded_players if player_id == str(player['_id'])])
| 31.432432
| 88
| 0.736028
|
from datetime import datetime
from chess_game.daos.player_dao import PlayerDao
from chess_game.models.player import Player
def test_player_dao_init(mongo_database):
player_dao = PlayerDao(mongo_database)
assert mongo_database == player_dao._mongo_database
def test_dao_create_and_find_player(mongo_database):
start_date = datetime.now()
player = Player(name="_Obi", stats={}, games=[], start_date=start_date)
player_dao = PlayerDao(mongo_database)
player_id = player_dao.create(player)
loaded_player = player_dao.find_by_id(player_id)
assert loaded_player['_id']
assert "_Obi" == loaded_player['name']
assert {} == loaded_player['stats']
assert [] == loaded_player['games']
assert f'{start_date:%Y-%m-%d %H:%M:%S}' == loaded_player['start_date']
def test_dao_create_and_find_players(mongo_database):
player = Player()
player_dao = PlayerDao(mongo_database)
player_dao.create(player)
player_id = player_dao.create(player)
loaded_players = player_dao.find_all()
assert len(loaded_players) > 1
assert len([player for player in loaded_players if player_id == str(player['_id'])])
| true
| true
|
79022eea5f202befb3d865da12ea9717575bfe7c
| 1,231
|
py
|
Python
|
lcb_version.py
|
griels/couchbase-python-client-ng
|
bcda55109f82e41041cf727d604bb335546f64e4
|
[
"Apache-2.0"
] | 1
|
2019-10-01T19:06:29.000Z
|
2019-10-01T19:06:29.000Z
|
lcb_version.py
|
pauldx/couchbase-python-client
|
98bdd44604675f7ad844b39f72e754dec6445cbb
|
[
"Apache-2.0"
] | null | null | null |
lcb_version.py
|
pauldx/couchbase-python-client
|
98bdd44604675f7ad844b39f72e754dec6445cbb
|
[
"Apache-2.0"
] | null | null | null |
import logging
import warnings
lcb_min_version_baseline = (2, 9, 0)
def get_lcb_min_version():
result = lcb_min_version_baseline
try:
# check the version listed in README.rst isn't greater than lcb_min_version
# bump it up to the specified version if it is
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
parser = docutils.parsers.rst.Parser()
with open("README.rst") as README:
settings = docutils.frontend.OptionParser().get_default_values()
settings.update(
dict(tab_width=4, report_level=1, pep_references=False, rfc_references=False, syntax_highlight=False),
docutils.frontend.OptionParser())
document = docutils.utils.new_document(README.name, settings=settings)
parser.parse(README.read(), document)
readme_min_version = tuple(
map(int, document.substitution_defs.get("libcouchbase_version").astext().split('.')))
result = max(result, readme_min_version)
logging.info("min version is {}".format(result))
except Exception as e:
warnings.warn("problem: {}".format(e))
return result
| 38.46875
| 118
| 0.656377
|
import logging
import warnings
lcb_min_version_baseline = (2, 9, 0)
def get_lcb_min_version():
result = lcb_min_version_baseline
try:
# bump it up to the specified version if it is
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
parser = docutils.parsers.rst.Parser()
with open("README.rst") as README:
settings = docutils.frontend.OptionParser().get_default_values()
settings.update(
dict(tab_width=4, report_level=1, pep_references=False, rfc_references=False, syntax_highlight=False),
docutils.frontend.OptionParser())
document = docutils.utils.new_document(README.name, settings=settings)
parser.parse(README.read(), document)
readme_min_version = tuple(
map(int, document.substitution_defs.get("libcouchbase_version").astext().split('.')))
result = max(result, readme_min_version)
logging.info("min version is {}".format(result))
except Exception as e:
warnings.warn("problem: {}".format(e))
return result
| true
| true
|
7902302e000b2b50d5fa2ccfac4751c48f081444
| 4,412
|
py
|
Python
|
setup.py
|
zalando/github-maintainer-cli
|
786610ab63e3d9e4c94edd0f013f04b006e9624f
|
[
"Apache-2.0"
] | 10
|
2016-06-07T06:00:27.000Z
|
2016-11-26T18:35:13.000Z
|
setup.py
|
hjacobs/github-maintainer-cli
|
786610ab63e3d9e4c94edd0f013f04b006e9624f
|
[
"Apache-2.0"
] | 4
|
2015-11-26T17:56:33.000Z
|
2016-05-14T09:27:42.000Z
|
setup.py
|
zalando-stups/github-maintainer-cli
|
786610ab63e3d9e4c94edd0f013f04b006e9624f
|
[
"Apache-2.0"
] | 2
|
2018-11-17T16:58:39.000Z
|
2021-07-09T23:46:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import inspect
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
if sys.version_info < (3, 4, 0):
sys.stderr.write('FATAL: This script needs to be run with Python 3.4+\n')
sys.exit(1)
__location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())))
def read_version(package):
data = {}
with open(os.path.join(package, '__init__.py'), 'r') as fd:
exec(fd.read(), data)
return data['__version__']
NAME = 'github-maintainer'
MAIN_PACKAGE = 'github_maintainer'
VERSION = read_version(MAIN_PACKAGE)
DESCRIPTION = 'CLI support tool for GitHub repo maintainers'
LICENSE = 'Apache License 2.0'
URL = 'https://github.com/zalando-stups/github-maintainer-cli'
AUTHOR = 'Henning Jacobs'
EMAIL = 'henning.jacobs@zalando.de'
COVERAGE_XML = True
COVERAGE_HTML = False
JUNIT_XML = True
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
]
CONSOLE_SCRIPTS = ['github-maintainer = github_maintainer.cli:main']
class PyTest(TestCommand):
user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=',
None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ['--cov', self.cov, '--cov-report', 'term-missing']
if self.cov_xml:
self.cov.extend(['--cov-report', 'xml'])
if self.cov_html:
self.cov.extend(['--cov-report', 'html'])
if self.junitxml is not None:
self.junitxml = ['--junitxml', self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError('py.test is not installed, run: pip install pytest')
params = {'args': self.test_args}
if self.cov:
params['args'] += self.cov
if self.junitxml:
params['args'] += self.junitxml
params['args'] += ['--doctest-modules', MAIN_PACKAGE, '-s']
errno = pytest.main(**params)
sys.exit(errno)
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split('\\n') if req != '']
def read(fname):
return open(os.path.join(__location__, fname), encoding='utf-8').read()
def setup_package():
# Assemble additional setup commands
cmdclass = {}
cmdclass['test'] = PyTest
install_reqs = get_install_requirements('requirements.txt')
command_options = {'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}}
if JUNIT_XML:
command_options['test']['junitxml'] = 'setup.py', 'junit.xml'
if COVERAGE_XML:
command_options['test']['cov_xml'] = 'setup.py', True
if COVERAGE_HTML:
command_options['test']['cov_html'] = 'setup.py', True
setup(
name=NAME,
version=VERSION,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
keywords='github git project maintainer',
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'flake8'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
)
if __name__ == '__main__':
setup_package()
| 31.514286
| 114
| 0.636673
|
import sys
import os
import inspect
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
if sys.version_info < (3, 4, 0):
sys.stderr.write('FATAL: This script needs to be run with Python 3.4+\n')
sys.exit(1)
__location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())))
def read_version(package):
data = {}
with open(os.path.join(package, '__init__.py'), 'r') as fd:
exec(fd.read(), data)
return data['__version__']
NAME = 'github-maintainer'
MAIN_PACKAGE = 'github_maintainer'
VERSION = read_version(MAIN_PACKAGE)
DESCRIPTION = 'CLI support tool for GitHub repo maintainers'
LICENSE = 'Apache License 2.0'
URL = 'https://github.com/zalando-stups/github-maintainer-cli'
AUTHOR = 'Henning Jacobs'
EMAIL = 'henning.jacobs@zalando.de'
COVERAGE_XML = True
COVERAGE_HTML = False
JUNIT_XML = True
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
]
CONSOLE_SCRIPTS = ['github-maintainer = github_maintainer.cli:main']
class PyTest(TestCommand):
user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=',
None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ['--cov', self.cov, '--cov-report', 'term-missing']
if self.cov_xml:
self.cov.extend(['--cov-report', 'xml'])
if self.cov_html:
self.cov.extend(['--cov-report', 'html'])
if self.junitxml is not None:
self.junitxml = ['--junitxml', self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError('py.test is not installed, run: pip install pytest')
params = {'args': self.test_args}
if self.cov:
params['args'] += self.cov
if self.junitxml:
params['args'] += self.junitxml
params['args'] += ['--doctest-modules', MAIN_PACKAGE, '-s']
errno = pytest.main(**params)
sys.exit(errno)
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split('\\n') if req != '']
def read(fname):
return open(os.path.join(__location__, fname), encoding='utf-8').read()
def setup_package():
cmdclass = {}
cmdclass['test'] = PyTest
install_reqs = get_install_requirements('requirements.txt')
command_options = {'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}}
if JUNIT_XML:
command_options['test']['junitxml'] = 'setup.py', 'junit.xml'
if COVERAGE_XML:
command_options['test']['cov_xml'] = 'setup.py', True
if COVERAGE_HTML:
command_options['test']['cov_html'] = 'setup.py', True
setup(
name=NAME,
version=VERSION,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
keywords='github git project maintainer',
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'flake8'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
)
if __name__ == '__main__':
setup_package()
| true
| true
|
790230325a2c117d009ad0980a677f43d07b8482
| 3,862
|
py
|
Python
|
tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _make_fake_dataset_fn():
"""Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (1s vs. 1ms).
"""
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset(1000 * 1000, 0).concatenate(make_dataset(1000,
100)).take(100)
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
def _benchmark(self, dataset_fn, iters, num_elements):
with ops.Graph().as_default():
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset_fn().with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
deltas = []
for _ in range(iters):
start = time.time()
for _ in range(num_elements):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
mean_wall_time = np.mean(deltas) / num_elements
self.report_benchmark(iters=iters, wall_time=mean_wall_time)
def benchmark_sequential_interleave(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(), cycle_length=10)
self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100)
def benchmark_parallel_interleave_v1(self):
"""Benchmark for parallel interleave that does not support autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(
interleave_ops.parallel_interleave(
_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
def benchmark_parallel_interleave_v2(self):
"""Benchmark for parallel interleave that supports autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(),
cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
if __name__ == "__main__":
test.main()
| 36.780952
| 80
| 0.71854
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _make_fake_dataset_fn():
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset(1000 * 1000, 0).concatenate(make_dataset(1000,
100)).take(100)
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
def _benchmark(self, dataset_fn, iters, num_elements):
with ops.Graph().as_default():
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset_fn().with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
deltas = []
for _ in range(iters):
start = time.time()
for _ in range(num_elements):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
mean_wall_time = np.mean(deltas) / num_elements
self.report_benchmark(iters=iters, wall_time=mean_wall_time)
def benchmark_sequential_interleave(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(), cycle_length=10)
self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100)
def benchmark_parallel_interleave_v1(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(
interleave_ops.parallel_interleave(
_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
def benchmark_parallel_interleave_v2(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(),
cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
if __name__ == "__main__":
test.main()
| true
| true
|
790230d92747e5bba04351a3b19b3db38b41c4ac
| 2,361
|
py
|
Python
|
v1/v1.0.0/gui/control_button_frame.py
|
vt-gs/tracking_client
|
81abc803766f935118ad37fa7492a8ab1f7c3582
|
[
"MIT"
] | null | null | null |
v1/v1.0.0/gui/control_button_frame.py
|
vt-gs/tracking_client
|
81abc803766f935118ad37fa7492a8ab1f7c3582
|
[
"MIT"
] | null | null | null |
v1/v1.0.0/gui/control_button_frame.py
|
vt-gs/tracking_client
|
81abc803766f935118ad37fa7492a8ab1f7c3582
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#version 2.1
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
from PyQt4.QtCore import pyqtSignal
class control_button_frame(QtGui.QFrame):
def __init__(self, parent=None, az_el = None):
super(control_button_frame, self).__init__()
self.parent = parent
self.az_el = az_el
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.init_widgets()
self.connect_signals()
def init_widgets(self):
self.MinusTenButton = QtGui.QPushButton(self)
self.MinusTenButton.setText("-10.0")
self.MinusTenButton.setMinimumWidth(45)
self.MinusOneButton = QtGui.QPushButton(self)
self.MinusOneButton.setText("-1.0")
self.MinusOneButton.setMinimumWidth(45)
self.MinusPtOneButton = QtGui.QPushButton(self)
self.MinusPtOneButton.setText("-0.1")
self.MinusPtOneButton.setMinimumWidth(45)
self.PlusPtOneButton = QtGui.QPushButton(self)
self.PlusPtOneButton.setText("+0.1")
self.PlusPtOneButton.setMinimumWidth(45)
self.PlusOneButton = QtGui.QPushButton(self)
self.PlusOneButton.setText("+1.0")
self.PlusOneButton.setMinimumWidth(45)
self.PlusTenButton = QtGui.QPushButton(self)
self.PlusTenButton.setText("+10.0")
self.PlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.MinusTenButton)
hbox1.addWidget(self.MinusOneButton)
hbox1.addWidget(self.MinusPtOneButton)
hbox1.addWidget(self.PlusPtOneButton)
hbox1.addWidget(self.PlusOneButton)
hbox1.addWidget(self.PlusTenButton)
self.setLayout(hbox1)
def connect_signals(self):
self.PlusPtOneButton.clicked.connect(self.button_clicked)
self.PlusOneButton.clicked.connect(self.button_clicked)
self.PlusTenButton.clicked.connect(self.button_clicked)
self.MinusPtOneButton.clicked.connect(self.button_clicked)
self.MinusOneButton.clicked.connect(self.button_clicked)
self.MinusTenButton.clicked.connect(self.button_clicked)
def button_clicked(self):
sender = self.sender()
self.parent.increment_target_angle(self.az_el,float(sender.text()))
| 34.217391
| 83
| 0.694197
|
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
from PyQt4.QtCore import pyqtSignal
class control_button_frame(QtGui.QFrame):
def __init__(self, parent=None, az_el = None):
super(control_button_frame, self).__init__()
self.parent = parent
self.az_el = az_el
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.init_widgets()
self.connect_signals()
def init_widgets(self):
self.MinusTenButton = QtGui.QPushButton(self)
self.MinusTenButton.setText("-10.0")
self.MinusTenButton.setMinimumWidth(45)
self.MinusOneButton = QtGui.QPushButton(self)
self.MinusOneButton.setText("-1.0")
self.MinusOneButton.setMinimumWidth(45)
self.MinusPtOneButton = QtGui.QPushButton(self)
self.MinusPtOneButton.setText("-0.1")
self.MinusPtOneButton.setMinimumWidth(45)
self.PlusPtOneButton = QtGui.QPushButton(self)
self.PlusPtOneButton.setText("+0.1")
self.PlusPtOneButton.setMinimumWidth(45)
self.PlusOneButton = QtGui.QPushButton(self)
self.PlusOneButton.setText("+1.0")
self.PlusOneButton.setMinimumWidth(45)
self.PlusTenButton = QtGui.QPushButton(self)
self.PlusTenButton.setText("+10.0")
self.PlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.MinusTenButton)
hbox1.addWidget(self.MinusOneButton)
hbox1.addWidget(self.MinusPtOneButton)
hbox1.addWidget(self.PlusPtOneButton)
hbox1.addWidget(self.PlusOneButton)
hbox1.addWidget(self.PlusTenButton)
self.setLayout(hbox1)
def connect_signals(self):
self.PlusPtOneButton.clicked.connect(self.button_clicked)
self.PlusOneButton.clicked.connect(self.button_clicked)
self.PlusTenButton.clicked.connect(self.button_clicked)
self.MinusPtOneButton.clicked.connect(self.button_clicked)
self.MinusOneButton.clicked.connect(self.button_clicked)
self.MinusTenButton.clicked.connect(self.button_clicked)
def button_clicked(self):
sender = self.sender()
self.parent.increment_target_angle(self.az_el,float(sender.text()))
| true
| true
|
790232e93462bb29267dbaff4e7e4e9e469fbbfc
| 22,553
|
py
|
Python
|
python/sls.py
|
DuttaAbhigyan/robust-adaptive-lqr
|
89d5ff606806a389a1ec4026bc5c17fb51573ae6
|
[
"MIT"
] | 26
|
2018-06-12T07:58:13.000Z
|
2022-03-06T19:45:25.000Z
|
python/sls.py
|
DuttaAbhigyan/robust-adaptive-lqr
|
89d5ff606806a389a1ec4026bc5c17fb51573ae6
|
[
"MIT"
] | null | null | null |
python/sls.py
|
DuttaAbhigyan/robust-adaptive-lqr
|
89d5ff606806a389a1ec4026bc5c17fb51573ae6
|
[
"MIT"
] | 7
|
2019-05-21T15:47:18.000Z
|
2021-12-11T11:13:43.000Z
|
"""sls.py
An implementation of the robust adaptive controller.
Both FIR SLS version with CVXPY and the common
Lyapunov relaxation.
"""
import numpy as np
import cvxpy as cvx
import utils
import logging
import math
import scipy.linalg
from abc import ABC, abstractmethod
from adaptive import AdaptiveMethod
class SLSInfeasibleException(Exception):
def __init__(self, msg=None):
super().__init__(msg)
def make_state_space_controller(Phi_x, Phi_u, n, p):
"""
Converts FIR transfer functions to a state
space realization of the dynamic controller,
mapping states to inputs.
"""
assert len(Phi_x.shape) == 2
assert len(Phi_u.shape) == 2
assert Phi_x.shape[1] == n
assert Phi_u.shape[1] == n
nT, _ = Phi_x.shape
pT, _ = Phi_u.shape
assert (nT % n) == 0
assert (pT % p) == 0
T = nT // n
assert T == (pT // p)
# See Theorem 2 of:
# https://nikolaimatni.github.io/papers/sls_state_space.pdf
Z = np.diag(np.ones(n*(T-2)), k=-n)
assert Z.shape == ((T-1)*n, (T-1)*n)
calI = np.zeros((n*(T-1), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[n*k:n*(k+1), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[p*k:p*(k+1), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = Z - calI.dot(Rhat)
B = -calI
C = M1.dot(Rhat) - Mhat
D = M1
return (A, B, C, D)
def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
"""
Gets the squared infinite horizon LQR cost for system
(A,B) in feedback with the controller defined by Phi_x
and Phi_u.
"""
n, p = B.shape
A_k, B_k, C_k, D_k = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([
[A + B.dot(D_k), B.dot(C_k)],
[B_k, A_k]
])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([
[Q_sqrt, np.zeros((n, A_k.shape[0]))],
[R_sqrt.dot(D_k), R_sqrt.dot(C_k)]
])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return (sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))
def _assert_AB_consistent(A, B):
assert len(A.shape) == 2 and A.shape[0] == A.shape[1]
assert len(B.shape) == 2
assert A.shape[0] == B.shape[0]
def _assert_ABCD_consistent(A, B, C, D):
_assert_AB_consistent(A, B)
assert len(C.shape) == 2
assert len(D.shape) == 2
assert C.shape[1] == A.shape[0]
assert C.shape[0] == D.shape[0]
assert D.shape[1] == B.shape[1]
def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
"""Apply an LTI controller K = (A_k,B_k,C_k,D_k)
Roll the true system (A, B) forward with the SS realization of the LTI
controller given. horizon is the length of the trajectory, and
sigma_w is the stddev of the Gaussian process noise.
"""
if rng is None:
rng = np.random
_assert_AB_consistent(A, B)
A_k, B_k, C_k, D_k = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
state_dim, input_dim = B.shape
psi_dim = A_k.shape[0]
assert C_k.shape[0] == input_dim
assert B_k.shape[1] == state_dim
if x0 is None:
x0 = np.zeros((state_dim,))
if psi0 is None:
psi0 = np.zeros((psi_dim,))
assert x0.shape == (state_dim,)
assert psi0.shape == (psi_dim,)
process = sigma_w*rng.normal(size=(horizon, state_dim))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros((horizon+1, state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros((horizon+1, psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = A_k.dot(psit) + B_k.dot(xt)
ut = C_k.dot(psit) + D_k.dot(xt)
xtp1 = A.dot(xt) + B.dot(ut) + process[t]
inputs[t, :] = ut
states[t+1, :] = xtp1
controller_states[t+1, :] = psitp1
xt = xtp1
psit = psitp1
return states, inputs, controller_states
def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
"""
Solves the SLS synthesis problem for length T FIR filters
using CVXPY
"""
assert len(Q.shape) == 2 and Q.shape[0] == Q.shape[1]
assert len(R.shape) == 2 and R.shape[0] == R.shape[1]
assert len(Ahat.shape) == 2 and Ahat.shape[0] == Ahat.shape[1]
assert len(Bhat.shape) == 2 and Bhat.shape[0] == Ahat.shape[0]
assert Q.shape[0] == Ahat.shape[0]
assert R.shape[0] == Bhat.shape[1]
assert eps_A >= 0
assert eps_B >= 0
assert T >= 1
assert gamma > 0 and gamma < 1
assert alpha > 0 and alpha < 1
if logger is None:
logger = logging.getLogger(__name__)
n, p = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
# Phi_x = \sum_{k=1}^{T} Phi_x[k] z^{-k}
Phi_x = cvx.Variable(T*n, n, name="Phi_x")
# Phi_u = \sum_{k=1}^{T} Phi_u[k] z^{-k}
Phi_u = cvx.Variable(T*p, n, name="Phi_u")
# htwo_cost
htwo_cost = cvx.Variable(name="htwo_cost")
# subspace constraint:
# [zI - Ah, -Bh] * [Phi_x; Phi_u] = I
#
# Note that:
# z Phi_x = \sum_{k=0}^{T-1} Phi_x[k+1] z^{-k}
#
# This means that:
# 1) Phi_x[1] = I
# 2) Phi_x[k+1] = Ah*Phi_x[k] + Bh*Phi_u[k] for k=1, ..., T-1
# 3) Ah*Phi_x[T] + Bh*Phi_u[T] = 0
constr = []
constr.append(Phi_x[:n, :] == np.eye(n))
for k in range(T-1):
constr.append(Phi_x[n*(k+1):n*(k+1+1), :] == Ahat*Phi_x[n*k:n*(k+1), :] + Bhat*Phi_u[p*k:p*(k+1), :])
constr.append(Ahat*Phi_x[n*(T-1):, :] + Bhat*Phi_u[p*(T-1):, :] == 0)
# H2 constraint:
# By Parseval's identity, this is equal (up to constants) to
#
# frobenius_norm(
# [ Q_sqrt*Phi_x[1] ;
# ...
# Q_sqrt*Phi_x[T] ;
# R_sqrt*Phi_u[1] ;
# ...
# R_sqrt*Phi_u[T]
# ]
# ) <= htwo_cost
# TODO: what is the best way to implement this in cvxpy?
constr.append(
cvx.norm(
cvx.bmat(
[[Q_sqrt*Phi_x[n*k:n*(k+1), :]] for k in range(T)] +
[[R_sqrt*Phi_u[p*k:p*(k+1), :]] for k in range(T)]),
'fro') <= htwo_cost)
# H-infinity constraint
#
# We want to enforce ||H(z)||_inf <= gamma, where
#
# H(z) = \sum_{k=1}^{T} [ mult_x * Phi_x[k] ; mult_u * Phi_u[k] ] z^{-k}.
#
# Here, each of the FIR coefficients has size (n+p) x n. Since n+p>n, we enforce
# the constraint on the transpose system H^T(z). The LMI constraint
# for this comes from Theorem 5.8 of
# Positive trigonometric polynomials and signal processing applications (2007) by
# B. Dumitrescu.
#
# Here is a table to map the variable names in the text to this program
#
# Text Program Comment
# -------------------------------------------------------------
# p n Output dim
# m n+p Input dim
# n T FIR horizon
# p(n+1) n(T+1) SDP variable size
# p(n+1) x m n(T+1) x (n+p)
mult_x = eps_A/np.sqrt(alpha)
mult_u = eps_B/np.sqrt(1-alpha)
# Hbar has size (T+1)*n x (n+p)
Hbar = cvx.bmat(
[[np.zeros((n, n)), np.zeros((n, p))]] +
[[mult_x*Phi_x[n*k:n*(k+1), :].T, mult_u*Phi_u[p*k:p*(k+1), :].T] for k in range(T)])
Q = cvx.Semidef(n*(T+1), name="Q")
# Constraint (5.44)
# Case k==0: the block diag of Q has to sum to gamma^2 * eye(n)
gamma_sq = gamma ** 2
constr.append(
sum([Q[n*t:n*(t+1), n*t:n*(t+1)] for t in range(T+1)]) == gamma_sq*np.eye(n))
# Case k>0: the block off-diag of Q has to sum to zero
for k in range(1, T+1):
constr.append(
sum([Q[n*t:n*(t+1), n*(t+k):n*(t+1+k)] for t in range(T+1-k)]) == np.zeros((n, n)))
# Constraint (5.45)
constr.append(
cvx.bmat([
[Q, Hbar],
[Hbar.T, np.eye(n+p)]]) == cvx.Semidef(n*(T+1) + (n+p)))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if prob.status == cvx.OPTIMAL:
logging.debug("successfully solved!")
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug("could not solve: {}".format(prob.status))
return (False, None, None, None)
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
"""
Solves the common Lyapunov relaxation to the robust
synthesis problem.
Taken from
lstd-lqr/blob/master/code/policy_iteration.ipynb
learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m
"""
if logger is None:
logger = logging.getLogger(__name__)
d, p = B.shape
X = cvx.Symmetric(d) # inverse Lyapunov function
Z = cvx.Variable(p, d) # -K*X
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable() # scalar for tuning the H_inf constraint
constraints = []
# H2 cost: trace(W)=H2 cost
mat1 = cvx.bmat([
[X, X, Z.T],
[X, W_11, W_12],
[Z, W_12.T, W_22]])
constraints.append(mat1 == cvx.Semidef(2*d + p))
# H_infinity constraint
mat2 = cvx.bmat([
[X-np.eye(d), (A*X+B*Z), np.zeros((d, d)), np.zeros((d, p))],
[(X*A.T+Z.T*B.T), X, eps_A*X, eps_B*Z.T],
[np.zeros((d, d)), eps_A*X, alph*(tau**2)*np.eye(d), np.zeros((d, p))],
[np.zeros((p, d)), eps_B*Z, np.zeros((p, d)), (1-alph)*(tau**2)*np.eye(p)]])
constraints.append(mat2 == cvx.Semidef(3*d + p))
# constrain alpha to be in [0,1]:
constraints.append(alph >= 0)
constraints.append(alph <= 1)
# Solve!
objective = cvx.Minimize(cvx.trace(Q*W_11) + cvx.trace(R*W_22))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn("SolverError encountered")
return (False, None, None, None)
if prob.status == cvx.OPTIMAL:
logging.debug("common_lyapunov: found optimal solution")
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
# NOTE: the K returned here is meant to be used
# as A + BK **NOT** A - BK
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug("common_lyapunov: could not solve (status={})".format(prob.status))
return (False, None, None, None)
class SLS_Implementation(ABC):
@abstractmethod
def open(self):
"""
"""
pass
@abstractmethod
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
"""
"""
pass
class SLS_CVXPY(SLS_Implementation):
def open(self):
pass
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
return sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger)
class SLS_FIRStrategy(AdaptiveMethod):
"""Adaptive control based on FIR truncated SLS
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier,
truncation_length, actual_error_multiplier,
use_gamma=0.98, sls_impl=None):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
# TODO(stephentu):
# the truncation length should grow with time, but for now
# we keep it constant
# Additionally, gamma should be searched over as an optimization
# variable. For how, we fix the value.
# Finally, the optimization problem should be modified
# to involve the variable V as in https://arxiv.org/abs/1805.09388
self._truncation_length = truncation_length
self._actual_error_multiplier = actual_error_multiplier
self._sls_impl = sls_impl if sls_impl is not None else SLS_CVXPY()
self._logger = logging.getLogger(__name__)
self._use_gamma = use_gamma
self._controller_state = None
def _get_logger(self):
return self._logger
def reset(self, rng):
super().reset(rng)
self._sls_impl.open()
self._midway_infeasible = 0
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
# if SLS is not feasible, we fallback to the current
# control policy if it exists, otherwise we throw an SLSInfeasibleException
if self._use_gamma is None:
# bisect for gamma
logger.info("_design_controller(epoch={}): bisecting for gamma".format(epoch_id))
INF = 1e12
def fn(gamma):
is_feasible, obj, _, _ = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma, alpha=0.5, logger=logger)
if not is_feasible:
return INF
else:
return 1/(1-gamma) * obj
disp_lvl = 3 if logger.isEnabledFor(logging.DEBUG) else 0
gamma_star, _, error_flag, _ = scipy.optimize.fminbound(fn, 0, 1 - 1e-5, xtol=1e-2, maxfun=20, full_output=True, disp=disp_lvl)
if error_flag:
logger.warn("_design_controller(epoch={}): maxfun exceeded during bisection, gamma_star={}".format(epoch_id, gamma_star))
logger.info("_design_controller(epoch={}): using gamma_star={}".format(epoch_id, gamma_star))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma_star, alpha=0.5, logger=logger)
else:
assert self._use_gamma > 0 and self._use_gamma < 1
logger.info("_design_controller(epoch={}): using fixed gamma={}".format(epoch_id, self._use_gamma))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=self._use_gamma, alpha=0.5, logger=logger)
if not is_feasible:
logger.info("_design_controller(epoch={}): SLS was not feasible...".format(epoch_id))
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._Phi_x = Phi_x
self._Phi_u = Phi_u
self._current_K = make_state_space_controller(Phi_x, Phi_u, self._n, self._p)
# compute the infinite horizon cost of this controller
Jnom = h2_squared_norm(self._A_star,
self._B_star,
self._Phi_x,
self._Phi_u,
self._Q,
self._R,
self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
A_k, B_k, C_k, D_k = self._current_K
psit = self._controller_state
if psit is None:
psit = np.zeros((A_k.shape[0],))
psitp1 = A_k.dot(psit) + B_k.dot(state)
ctrl_input = C_k.dot(psit) + D_k.dot(state)
self._controller_state = psitp1
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
class SLS_CommonLyapunovStrategy(AdaptiveMethod):
"""
Adaptive control based on common Lyapunov relaxation
of robust control problem
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier, actual_error_multiplier):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
self._actual_error_multiplier = actual_error_multiplier
self._logger = logging.getLogger(__name__)
self._midway_infeasible = 0
def reset(self, rng):
super().reset(rng)
self._midway_infeasible = 0
def _get_logger(self):
return self._logger
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
is_feasible, _, _, K = sls_common_lyapunov(
Anom, Bnom, self._Q, self._R,
effective_eps_A, effective_eps_B, tau=0.999, logger=logger)
if not is_feasible:
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._current_K = K
# compute the infinite horizon cost of this controller
Jnom = utils.LQR_cost(self._A_star, self._B_star, self._current_K, self._Q, self._R, self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
ctrl_input = self._current_K.dot(state)
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
def _main():
import examples
A_star, B_star = examples.unstable_laplacian_dynamics()
# define costs
Q = 1e-3 * np.eye(3)
R = np.eye(3)
# initial controller
_, K_init = utils.dlqr(A_star, B_star, 1e-3*np.eye(3), np.eye(3))
rng = np.random
env = SLS_FIRStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
truncation_length=12,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
env = SLS_CommonLyapunovStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=200)
_main()
| 33.166176
| 207
| 0.579479
|
import numpy as np
import cvxpy as cvx
import utils
import logging
import math
import scipy.linalg
from abc import ABC, abstractmethod
from adaptive import AdaptiveMethod
class SLSInfeasibleException(Exception):
def __init__(self, msg=None):
super().__init__(msg)
def make_state_space_controller(Phi_x, Phi_u, n, p):
assert len(Phi_x.shape) == 2
assert len(Phi_u.shape) == 2
assert Phi_x.shape[1] == n
assert Phi_u.shape[1] == n
nT, _ = Phi_x.shape
pT, _ = Phi_u.shape
assert (nT % n) == 0
assert (pT % p) == 0
T = nT // n
assert T == (pT // p)
Z = np.diag(np.ones(n*(T-2)), k=-n)
assert Z.shape == ((T-1)*n, (T-1)*n)
calI = np.zeros((n*(T-1), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[n*k:n*(k+1), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[p*k:p*(k+1), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = Z - calI.dot(Rhat)
B = -calI
C = M1.dot(Rhat) - Mhat
D = M1
return (A, B, C, D)
def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
n, p = B.shape
A_k, B_k, C_k, D_k = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([
[A + B.dot(D_k), B.dot(C_k)],
[B_k, A_k]
])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([
[Q_sqrt, np.zeros((n, A_k.shape[0]))],
[R_sqrt.dot(D_k), R_sqrt.dot(C_k)]
])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return (sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))
def _assert_AB_consistent(A, B):
assert len(A.shape) == 2 and A.shape[0] == A.shape[1]
assert len(B.shape) == 2
assert A.shape[0] == B.shape[0]
def _assert_ABCD_consistent(A, B, C, D):
_assert_AB_consistent(A, B)
assert len(C.shape) == 2
assert len(D.shape) == 2
assert C.shape[1] == A.shape[0]
assert C.shape[0] == D.shape[0]
assert D.shape[1] == B.shape[1]
def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
if rng is None:
rng = np.random
_assert_AB_consistent(A, B)
A_k, B_k, C_k, D_k = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
state_dim, input_dim = B.shape
psi_dim = A_k.shape[0]
assert C_k.shape[0] == input_dim
assert B_k.shape[1] == state_dim
if x0 is None:
x0 = np.zeros((state_dim,))
if psi0 is None:
psi0 = np.zeros((psi_dim,))
assert x0.shape == (state_dim,)
assert psi0.shape == (psi_dim,)
process = sigma_w*rng.normal(size=(horizon, state_dim))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros((horizon+1, state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros((horizon+1, psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = A_k.dot(psit) + B_k.dot(xt)
ut = C_k.dot(psit) + D_k.dot(xt)
xtp1 = A.dot(xt) + B.dot(ut) + process[t]
inputs[t, :] = ut
states[t+1, :] = xtp1
controller_states[t+1, :] = psitp1
xt = xtp1
psit = psitp1
return states, inputs, controller_states
def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
assert len(Q.shape) == 2 and Q.shape[0] == Q.shape[1]
assert len(R.shape) == 2 and R.shape[0] == R.shape[1]
assert len(Ahat.shape) == 2 and Ahat.shape[0] == Ahat.shape[1]
assert len(Bhat.shape) == 2 and Bhat.shape[0] == Ahat.shape[0]
assert Q.shape[0] == Ahat.shape[0]
assert R.shape[0] == Bhat.shape[1]
assert eps_A >= 0
assert eps_B >= 0
assert T >= 1
assert gamma > 0 and gamma < 1
assert alpha > 0 and alpha < 1
if logger is None:
logger = logging.getLogger(__name__)
n, p = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
Phi_x = cvx.Variable(T*n, n, name="Phi_x")
Phi_u = cvx.Variable(T*p, n, name="Phi_u")
htwo_cost = cvx.Variable(name="htwo_cost")
constr = []
constr.append(Phi_x[:n, :] == np.eye(n))
for k in range(T-1):
constr.append(Phi_x[n*(k+1):n*(k+1+1), :] == Ahat*Phi_x[n*k:n*(k+1), :] + Bhat*Phi_u[p*k:p*(k+1), :])
constr.append(Ahat*Phi_x[n*(T-1):, :] + Bhat*Phi_u[p*(T-1):, :] == 0)
#
# frobenius_norm(
# [ Q_sqrt*Phi_x[1] ;
# ...
# Q_sqrt*Phi_x[T] ;
# R_sqrt*Phi_u[1] ;
# ...
# R_sqrt*Phi_u[T]
# ]
# ) <= htwo_cost
# TODO: what is the best way to implement this in cvxpy?
constr.append(
cvx.norm(
cvx.bmat(
[[Q_sqrt*Phi_x[n*k:n*(k+1), :]] for k in range(T)] +
[[R_sqrt*Phi_u[p*k:p*(k+1), :]] for k in range(T)]),
'fro') <= htwo_cost)
# H-infinity constraint
#
# We want to enforce ||H(z)||_inf <= gamma, where
#
# H(z) = \sum_{k=1}^{T} [ mult_x * Phi_x[k] ; mult_u * Phi_u[k] ] z^{-k}.
#
# Here, each of the FIR coefficients has size (n+p) x n. Since n+p>n, we enforce
# the constraint on the transpose system H^T(z). The LMI constraint
# for this comes from Theorem 5.8 of
# Positive trigonometric polynomials and signal processing applications (2007) by
# B. Dumitrescu.
#
# Here is a table to map the variable names in the text to this program
#
# Text Program Comment
# -------------------------------------------------------------
# p n Output dim
# m n+p Input dim
# n T FIR horizon
# p(n+1) n(T+1) SDP variable size
# p(n+1) x m n(T+1) x (n+p)
mult_x = eps_A/np.sqrt(alpha)
mult_u = eps_B/np.sqrt(1-alpha)
# Hbar has size (T+1)*n x (n+p)
Hbar = cvx.bmat(
[[np.zeros((n, n)), np.zeros((n, p))]] +
[[mult_x*Phi_x[n*k:n*(k+1), :].T, mult_u*Phi_u[p*k:p*(k+1), :].T] for k in range(T)])
Q = cvx.Semidef(n*(T+1), name="Q")
# Constraint (5.44)
# Case k==0: the block diag of Q has to sum to gamma^2 * eye(n)
gamma_sq = gamma ** 2
constr.append(
sum([Q[n*t:n*(t+1), n*t:n*(t+1)] for t in range(T+1)]) == gamma_sq*np.eye(n))
# Case k>0: the block off-diag of Q has to sum to zero
for k in range(1, T+1):
constr.append(
sum([Q[n*t:n*(t+1), n*(t+k):n*(t+1+k)] for t in range(T+1-k)]) == np.zeros((n, n)))
# Constraint (5.45)
constr.append(
cvx.bmat([
[Q, Hbar],
[Hbar.T, np.eye(n+p)]]) == cvx.Semidef(n*(T+1) + (n+p)))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if prob.status == cvx.OPTIMAL:
logging.debug("successfully solved!")
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug("could not solve: {}".format(prob.status))
return (False, None, None, None)
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
if logger is None:
logger = logging.getLogger(__name__)
d, p = B.shape
X = cvx.Symmetric(d) # inverse Lyapunov function
Z = cvx.Variable(p, d) # -K*X
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable() # scalar for tuning the H_inf constraint
constraints = []
# H2 cost: trace(W)=H2 cost
mat1 = cvx.bmat([
[X, X, Z.T],
[X, W_11, W_12],
[Z, W_12.T, W_22]])
constraints.append(mat1 == cvx.Semidef(2*d + p))
# H_infinity constraint
mat2 = cvx.bmat([
[X-np.eye(d), (A*X+B*Z), np.zeros((d, d)), np.zeros((d, p))],
[(X*A.T+Z.T*B.T), X, eps_A*X, eps_B*Z.T],
[np.zeros((d, d)), eps_A*X, alph*(tau**2)*np.eye(d), np.zeros((d, p))],
[np.zeros((p, d)), eps_B*Z, np.zeros((p, d)), (1-alph)*(tau**2)*np.eye(p)]])
constraints.append(mat2 == cvx.Semidef(3*d + p))
# constrain alpha to be in [0,1]:
constraints.append(alph >= 0)
constraints.append(alph <= 1)
# Solve!
objective = cvx.Minimize(cvx.trace(Q*W_11) + cvx.trace(R*W_22))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn("SolverError encountered")
return (False, None, None, None)
if prob.status == cvx.OPTIMAL:
logging.debug("common_lyapunov: found optimal solution")
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
# NOTE: the K returned here is meant to be used
# as A + BK **NOT** A - BK
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug("common_lyapunov: could not solve (status={})".format(prob.status))
return (False, None, None, None)
class SLS_Implementation(ABC):
@abstractmethod
def open(self):
pass
@abstractmethod
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
pass
class SLS_CVXPY(SLS_Implementation):
def open(self):
pass
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
return sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger)
class SLS_FIRStrategy(AdaptiveMethod):
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier,
truncation_length, actual_error_multiplier,
use_gamma=0.98, sls_impl=None):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
# TODO(stephentu):
# the truncation length should grow with time, but for now
# we keep it constant
# Additionally, gamma should be searched over as an optimization
# variable. For how, we fix the value.
# Finally, the optimization problem should be modified
# to involve the variable V as in https://arxiv.org/abs/1805.09388
self._truncation_length = truncation_length
self._actual_error_multiplier = actual_error_multiplier
self._sls_impl = sls_impl if sls_impl is not None else SLS_CVXPY()
self._logger = logging.getLogger(__name__)
self._use_gamma = use_gamma
self._controller_state = None
def _get_logger(self):
return self._logger
def reset(self, rng):
super().reset(rng)
self._sls_impl.open()
self._midway_infeasible = 0
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
# if SLS is not feasible, we fallback to the current
# control policy if it exists, otherwise we throw an SLSInfeasibleException
if self._use_gamma is None:
# bisect for gamma
logger.info("_design_controller(epoch={}): bisecting for gamma".format(epoch_id))
INF = 1e12
def fn(gamma):
is_feasible, obj, _, _ = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma, alpha=0.5, logger=logger)
if not is_feasible:
return INF
else:
return 1/(1-gamma) * obj
disp_lvl = 3 if logger.isEnabledFor(logging.DEBUG) else 0
gamma_star, _, error_flag, _ = scipy.optimize.fminbound(fn, 0, 1 - 1e-5, xtol=1e-2, maxfun=20, full_output=True, disp=disp_lvl)
if error_flag:
logger.warn("_design_controller(epoch={}): maxfun exceeded during bisection, gamma_star={}".format(epoch_id, gamma_star))
logger.info("_design_controller(epoch={}): using gamma_star={}".format(epoch_id, gamma_star))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma_star, alpha=0.5, logger=logger)
else:
assert self._use_gamma > 0 and self._use_gamma < 1
logger.info("_design_controller(epoch={}): using fixed gamma={}".format(epoch_id, self._use_gamma))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=self._use_gamma, alpha=0.5, logger=logger)
if not is_feasible:
logger.info("_design_controller(epoch={}): SLS was not feasible...".format(epoch_id))
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._Phi_x = Phi_x
self._Phi_u = Phi_u
self._current_K = make_state_space_controller(Phi_x, Phi_u, self._n, self._p)
# compute the infinite horizon cost of this controller
Jnom = h2_squared_norm(self._A_star,
self._B_star,
self._Phi_x,
self._Phi_u,
self._Q,
self._R,
self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
A_k, B_k, C_k, D_k = self._current_K
psit = self._controller_state
if psit is None:
psit = np.zeros((A_k.shape[0],))
psitp1 = A_k.dot(psit) + B_k.dot(state)
ctrl_input = C_k.dot(psit) + D_k.dot(state)
self._controller_state = psitp1
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
class SLS_CommonLyapunovStrategy(AdaptiveMethod):
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier, actual_error_multiplier):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
self._actual_error_multiplier = actual_error_multiplier
self._logger = logging.getLogger(__name__)
self._midway_infeasible = 0
def reset(self, rng):
super().reset(rng)
self._midway_infeasible = 0
def _get_logger(self):
return self._logger
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
is_feasible, _, _, K = sls_common_lyapunov(
Anom, Bnom, self._Q, self._R,
effective_eps_A, effective_eps_B, tau=0.999, logger=logger)
if not is_feasible:
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._current_K = K
# compute the infinite horizon cost of this controller
Jnom = utils.LQR_cost(self._A_star, self._B_star, self._current_K, self._Q, self._R, self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
ctrl_input = self._current_K.dot(state)
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
def _main():
import examples
A_star, B_star = examples.unstable_laplacian_dynamics()
# define costs
Q = 1e-3 * np.eye(3)
R = np.eye(3)
# initial controller
_, K_init = utils.dlqr(A_star, B_star, 1e-3*np.eye(3), np.eye(3))
rng = np.random
env = SLS_FIRStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
truncation_length=12,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
env = SLS_CommonLyapunovStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=200)
_main()
| true
| true
|
790232f83ba7b743ce4f67462f236a3d4e6f1ec2
| 288
|
py
|
Python
|
ChatApp/settings.py
|
xckomorebi/ChatApp
|
cc59208e4d31391ea7be5075d629479a4fffd2b6
|
[
"MIT"
] | null | null | null |
ChatApp/settings.py
|
xckomorebi/ChatApp
|
cc59208e4d31391ea7be5075d629479a4fffd2b6
|
[
"MIT"
] | null | null | null |
ChatApp/settings.py
|
xckomorebi/ChatApp
|
cc59208e4d31391ea7be5075d629479a4fffd2b6
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
DB_NAME = "chatapp.db"
PROJECT_PATH = Path(__file__).parents[1]
DB_PATH = os.path.join(PROJECT_PATH, "resource", DB_NAME)
PORT_MIN = 1024
PORT_MAX = 65535
DEBUG = os.getenv("CHAT_APP_DEBUG", False)
if DEBUG:
TIMEOUT = 30
else:
TIMEOUT = 0.5
| 16.941176
| 57
| 0.71875
|
import os
from pathlib import Path
DB_NAME = "chatapp.db"
PROJECT_PATH = Path(__file__).parents[1]
DB_PATH = os.path.join(PROJECT_PATH, "resource", DB_NAME)
PORT_MIN = 1024
PORT_MAX = 65535
DEBUG = os.getenv("CHAT_APP_DEBUG", False)
if DEBUG:
TIMEOUT = 30
else:
TIMEOUT = 0.5
| true
| true
|
79023304d3fe4dbdbaea5c808ee5263b5eee52d1
| 628
|
py
|
Python
|
socialpages/migrations/0002_auto_20200808_1457.py
|
OjureFred/SocialGram
|
37afc8cabff9cccfd7f0577d182b13ed463e7c6e
|
[
"MIT"
] | null | null | null |
socialpages/migrations/0002_auto_20200808_1457.py
|
OjureFred/SocialGram
|
37afc8cabff9cccfd7f0577d182b13ed463e7c6e
|
[
"MIT"
] | null | null | null |
socialpages/migrations/0002_auto_20200808_1457.py
|
OjureFred/SocialGram
|
37afc8cabff9cccfd7f0577d182b13ed463e7c6e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-08 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('socialpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
]
| 25.12
| 114
| 0.555732
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('socialpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
]
| true
| true
|
79023356e1315490b562a5a13758e54ec31ee7cb
| 10,545
|
py
|
Python
|
doc/source/notebooks/understanding/models.pct.py
|
christabella/GPflow
|
30824d289f8ee3f58d4249238c8b7267e6a0b2fc
|
[
"Apache-2.0"
] | null | null | null |
doc/source/notebooks/understanding/models.pct.py
|
christabella/GPflow
|
30824d289f8ee3f58d4249238c8b7267e6a0b2fc
|
[
"Apache-2.0"
] | null | null | null |
doc/source/notebooks/understanding/models.pct.py
|
christabella/GPflow
|
30824d289f8ee3f58d4249238c8b7267e6a0b2fc
|
[
"Apache-2.0"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Manipulating GPflow models
#
# One of the key ingredients in GPflow is the model class, which enables you to carefully control parameters. This notebook shows how some of these parameter control features work, and how to build your own model with GPflow. First we'll look at:
#
# - how to view models and parameters
# - how to set parameter values
# - how to constrain parameters (for example, variance > 0)
# - how to fix model parameters
# - how to apply priors to parameters
# - how to optimize models
#
# Then we'll show how to build a simple logistic regression model, demonstrating the ease of the parameter framework.
#
# GPy users should feel right at home, but there are some small differences.
#
# First, let's deal with the usual notebook boilerplate and make a simple GP regression model. See [Basic (Gaussian likelihood) GP regression model](../basics/regression.ipynb) for specifics of the model; we just want some parameters to play with.
# %%
import numpy as np
import gpflow
import tensorflow_probability as tfp
from gpflow.utilities import print_summary, set_trainable, to_default_float
# %% [markdown]
# We begin by creating a very simple GP regression model:
# %%
# generate toy data
np.random.seed(1)
X = np.random.rand(20, 1)
Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20, 1) * 0.01
m = gpflow.models.GPR((X, Y), kernel=gpflow.kernels.Matern32() + gpflow.kernels.Linear())
# %% [markdown]
# ## Viewing, getting, and setting parameters
# You can display the state of the model in a terminal by using `print_summary(m)`. You can change the display format using the `fmt` keyword argument, e.g. `'html'`. In a notebook, you can also use `fmt='notebook'` or set the default printing format as `notebook`:
# %%
print_summary(m, fmt="notebook")
# %%
gpflow.config.set_default_summary_fmt("notebook")
# %% [markdown]
# This model has four parameters. The kernel is made of the sum of two parts. The first (counting from zero) is a Matern32 kernel that has a variance parameter and a lengthscales parameter; the second is a linear kernel that has only a variance parameter. There is also a parameter that controls the variance of the noise, as part of the likelihood.
#
# All the model variables have been initialized at `1.0`. You can access individual parameters in the same way that you display the state of the model in a terminal; for example, to see all the parameters that are part of the likelihood, run:
# %%
print_summary(m.likelihood)
# %% [markdown]
# This gets more useful with more complex models!
# %% [markdown]
# To set the value of a parameter, just use `assign()`:
# %%
m.kernel.kernels[0].lengthscales.assign(0.5)
m.likelihood.variance.assign(0.01)
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Constraints and trainable variables
#
# GPflow helpfully creates an unconstrained representation of all the variables. In the previous example, all the variables are constrained positively (see the **transform** column in the table); the unconstrained representation is given by $\alpha = \log(\exp(\theta)-1)$. The `trainable_parameters` property returns the constrained values:
# %%
m.trainable_parameters
# %% [markdown]
# Each parameter has an `unconstrained_variable` attribute that enables you to access the unconstrained value as a TensorFlow `Variable`.
# %%
p = m.kernel.kernels[0].lengthscales
p.unconstrained_variable
# %% [markdown]
# You can also check the unconstrained value as follows:
# %%
p.transform.inverse(p)
# %% [markdown]
# Constraints are handled by the Bijector classes from the `tensorflow_probability` package. You might prefer to use the constraint $\alpha = \log(\theta)$; this is easily done by replacing the parameter with one that has a different `transform` attribute (here we make sure to copy all other attributes across from the old parameter; this is not necessary when there is no `prior` and the `trainable` state is still the default of `True`):
# %%
old_parameter = m.kernel.kernels[0].lengthscales
new_parameter = gpflow.Parameter(
old_parameter,
trainable=old_parameter.trainable,
prior=old_parameter.prior,
name=old_parameter.name.split(":")[0], # tensorflow is weird and adds ':0' to the name
transform=tfp.bijectors.Exp(),
)
m.kernel.kernels[0].lengthscales = new_parameter
# %% [markdown]
# Though the lengthscale itself remains the same, the unconstrained lengthscale has changed:
# %%
p.transform.inverse(p)
# %% [markdown]
# You can also change the `transform` attribute in place:
# %%
m.kernel.kernels[0].variance.transform = tfp.bijectors.Exp()
# %%
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Changing whether a parameter will be trained in optimization
#
# Another helpful feature is the ability to fix parameters. To do this, simply set the `trainable` attribute to `False`; this is shown in the **trainable** column of the representation, and the corresponding variable is removed from the free state.
# %%
set_trainable(m.kernel.kernels[1].variance, False)
print_summary(m)
# %%
m.trainable_parameters
# %% [markdown]
# To unfix a parameter, just set the `trainable` attribute to `True` again.
# %%
set_trainable(m.kernel.kernels[1].variance, True)
print_summary(m)
# %% [markdown]
# **NOTE:** If you want to recursively change the `trainable` status of an object that *contains* parameters, you **must** use the `set_trainable()` utility function.
#
# A module (e.g. a model, kernel, likelihood, ... instance) does not have a `trainable` attribute:
# %%
try:
m.kernel.trainable
except AttributeError:
print(f"{m.kernel.__class__.__name__} does not have a trainable attribute")
# %%
set_trainable(m.kernel, False)
print_summary(m)
# %% [markdown]
# ## Priors
#
# You can set priors in the same way as transforms and trainability, by using `tensorflow_probability` distribution objects. Let's set a Gamma prior on the variance of the Matern32 kernel.
# %%
k = gpflow.kernels.Matern32()
k.variance.prior = tfp.distributions.Gamma(to_default_float(2), to_default_float(3))
print_summary(k)
# %%
m.kernel.kernels[0].variance.prior = tfp.distributions.Gamma(
to_default_float(2), to_default_float(3)
)
print_summary(m)
# %% [markdown]
# ## Optimization
#
# To optimize your model, first create an instance of an optimizer (in this case, `gpflow.optimizers.Scipy`), which has optional arguments that are passed to `scipy.optimize.minimize` (we minimize the negative log likelihood). Then, call the `minimize` method of that optimizer, with your model as the optimization target. Variables that have priors are maximum a priori (MAP) estimated, that is, we add the log prior to the log likelihood, and otherwise use Maximum Likelihood.
# %%
def closure():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
# %% [markdown]
# ## Building new models
#
# To build new models, you'll need to inherit from `gpflow.models.BayesianModel`. Parameters are instantiated with `gpflow.Parameter`. You might also be interested in `tf.Module`, which acts as a 'container' for `Parameter`s (for example, kernels are `tf.Module`s).
#
# In this very simple demo, we'll implement linear multiclass classification.
#
# There are two parameters: a weight matrix and a bias (offset). The key thing to implement the `log_likelihood` method, which returns a TensorFlow scalar that represents the (log) likelihood. You can use parameter objects inside `log_likelihood`.
#
# %%
import tensorflow as tf
class LinearMulticlass(gpflow.models.BayesianModel):
def __init__(self, X, Y, name=None):
super().__init__(name=name) # always call the parent constructor
self.X = X.copy() # X is a NumPy array of inputs
self.Y = Y.copy() # Y is a 1-of-k (one-hot) representation of the labels
self.num_data, self.input_dim = X.shape
_, self.num_classes = Y.shape
# make some parameters
self.W = gpflow.Parameter(np.random.randn(self.input_dim, self.num_classes))
self.b = gpflow.Parameter(np.random.randn(self.num_classes))
# ^^ You must make the parameters attributes of the class for
# them to be picked up by the model. i.e. this won't work:
#
# W = gpflow.Param(... <-- must be self.W
def log_likelihood(self): # takes no arguments
p = tf.nn.softmax(
tf.matmul(self.X, self.W) + self.b
) # Param variables are used as tensorflow arrays.
return tf.reduce_sum(tf.math.log(p) * self.Y) # be sure to return a scalar
# %% [markdown]
# ...and that's it. Let's build a really simple demo to show that it works.
# %%
np.random.seed(123)
X = np.vstack(
[
np.random.randn(10, 2) + [2, 2],
np.random.randn(10, 2) + [-2, 2],
np.random.randn(10, 2) + [2, -2],
]
)
Y = np.repeat(np.eye(3), 10, 0)
from matplotlib import pyplot as plt
plt.style.use("ggplot")
# %matplotlib inline
import matplotlib
matplotlib.rcParams["figure.figsize"] = (12, 6)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %%
m = LinearMulticlass(X, Y)
m
# %%
def closure():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
# %%
xx, yy = np.mgrid[-4:4:200j, -4:4:200j]
X_test = np.vstack([xx.flatten(), yy.flatten()]).T
f_test = np.dot(X_test, m.W.read_value()) + m.b.read_value()
p_test = np.exp(f_test)
p_test /= p_test.sum(1)[:, None]
# %%
plt.figure(figsize=(12, 6))
for i in range(3):
plt.contour(xx, yy, p_test[:, i].reshape(200, 200), [0.5], colors="k", linewidths=1)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %% [markdown]
# That concludes the new model example and this notebook. You might want to see for yourself that the `LinearMulticlass` model and its parameters have all the functionality demonstrated here. You could also add some priors and run Hamiltonian Monte Carlo using the HMC optimizer `gpflow.train.HMC` and its `sample` method. See [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb) for more information on running the sampler.
| 36.870629
| 478
| 0.715789
|
and parameters
# - how to set parameter values
# - how to constrain parameters (for example, variance > 0)
# - how to fix model parameters
# - how to apply priors to parameters
# - how to optimize models
#
# Then we'll show how to build a simple logistic regression model, demonstrating the ease of the parameter framework.
# %%
import numpy as np
import gpflow
import tensorflow_probability as tfp
from gpflow.utilities import print_summary, set_trainable, to_default_float
# %% [markdown]
# We begin by creating a very simple GP regression model:
# %%
# generate toy data
np.random.seed(1)
X = np.random.rand(20, 1)
Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20, 1) * 0.01
m = gpflow.models.GPR((X, Y), kernel=gpflow.kernels.Matern32() + gpflow.kernels.Linear())
# %% [markdown]
# ## Viewing, getting, and setting parameters
# You can display the state of the model in a terminal by using `print_summary(m)`. You can change the display format using the `fmt` keyword argument, e.g. `'html'`. In a notebook, you can also use `fmt='notebook'` or set the default printing format as `notebook`:
# %%
print_summary(m, fmt="notebook")
# %%
gpflow.config.set_default_summary_fmt("notebook")
# %% [markdown]
# This model has four parameters. The kernel is made of the sum of two parts. The first (counting from zero) is a Matern32 kernel that has a variance parameter and a lengthscales parameter; the second is a linear kernel that has only a variance parameter. There is also a parameter that controls the variance of the noise, as part of the likelihood.
#
# All the model variables have been initialized at `1.0`. You can access individual parameters in the same way that you display the state of the model in a terminal; for example, to see all the parameters that are part of the likelihood, run:
# %%
print_summary(m.likelihood)
# %% [markdown]
# This gets more useful with more complex models!
# %% [markdown]
# To set the value of a parameter, just use `assign()`:
# %%
m.kernel.kernels[0].lengthscales.assign(0.5)
m.likelihood.variance.assign(0.01)
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Constraints and trainable variables
#
# GPflow helpfully creates an unconstrained representation of all the variables. In the previous example, all the variables are constrained positively (see the **transform** column in the table); the unconstrained representation is given by $\alpha = \log(\exp(\theta)-1)$. The `trainable_parameters` property returns the constrained values:
# %%
m.trainable_parameters
# %% [markdown]
# Each parameter has an `unconstrained_variable` attribute that enables you to access the unconstrained value as a TensorFlow `Variable`.
# %%
p = m.kernel.kernels[0].lengthscales
p.unconstrained_variable
# %% [markdown]
# You can also check the unconstrained value as follows:
# %%
p.transform.inverse(p)
# %% [markdown]
# Constraints are handled by the Bijector classes from the `tensorflow_probability` package. You might prefer to use the constraint $\alpha = \log(\theta)$; this is easily done by replacing the parameter with one that has a different `transform` attribute (here we make sure to copy all other attributes across from the old parameter; this is not necessary when there is no `prior` and the `trainable` state is still the default of `True`):
# %%
old_parameter = m.kernel.kernels[0].lengthscales
new_parameter = gpflow.Parameter(
old_parameter,
trainable=old_parameter.trainable,
prior=old_parameter.prior,
name=old_parameter.name.split(":")[0], # tensorflow is weird and adds ':0' to the name
transform=tfp.bijectors.Exp(),
)
m.kernel.kernels[0].lengthscales = new_parameter
# %% [markdown]
# Though the lengthscale itself remains the same, the unconstrained lengthscale has changed:
# %%
p.transform.inverse(p)
# %% [markdown]
# You can also change the `transform` attribute in place:
# %%
m.kernel.kernels[0].variance.transform = tfp.bijectors.Exp()
# %%
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Changing whether a parameter will be trained in optimization
#
# Another helpful feature is the ability to fix parameters. To do this, simply set the `trainable` attribute to `False`; this is shown in the **trainable** column of the representation, and the corresponding variable is removed from the free state.
# %%
set_trainable(m.kernel.kernels[1].variance, False)
print_summary(m)
# %%
m.trainable_parameters
# %% [markdown]
# To unfix a parameter, just set the `trainable` attribute to `True` again.
# %%
set_trainable(m.kernel.kernels[1].variance, True)
print_summary(m)
# %% [markdown]
# **NOTE:** If you want to recursively change the `trainable` status of an object that *contains* parameters, you **must** use the `set_trainable()` utility function.
#
# A module (e.g. a model, kernel, likelihood, ... instance) does not have a `trainable` attribute:
# %%
try:
m.kernel.trainable
except AttributeError:
print(f"{m.kernel.__class__.__name__} does not have a trainable attribute")
# %%
set_trainable(m.kernel, False)
print_summary(m)
# %% [markdown]
# ## Priors
#
# You can set priors in the same way as transforms and trainability, by using `tensorflow_probability` distribution objects. Let's set a Gamma prior on the variance of the Matern32 kernel.
k = gpflow.kernels.Matern32()
k.variance.prior = tfp.distributions.Gamma(to_default_float(2), to_default_float(3))
print_summary(k)
m.kernel.kernels[0].variance.prior = tfp.distributions.Gamma(
to_default_float(2), to_default_float(3)
)
print_summary(m)
n -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
ement linear multiclass classification.
import tensorflow as tf
class LinearMulticlass(gpflow.models.BayesianModel):
def __init__(self, X, Y, name=None):
super().__init__(name=name)
self.X = X.copy()
self.Y = Y.copy()
self.num_data, self.input_dim = X.shape
_, self.num_classes = Y.shape
self.W = gpflow.Parameter(np.random.randn(self.input_dim, self.num_classes))
self.b = gpflow.Parameter(np.random.randn(self.num_classes))
#
# W = gpflow.Param(... <-- must be self.W
def log_likelihood(self): # takes no arguments
p = tf.nn.softmax(
tf.matmul(self.X, self.W) + self.b
) # Param variables are used as tensorflow arrays.
return tf.reduce_sum(tf.math.log(p) * self.Y) # be sure to return a scalar
# %% [markdown]
# ...and that's it. Let's build a really simple demo to show that it works.
# %%
np.random.seed(123)
X = np.vstack(
[
np.random.randn(10, 2) + [2, 2],
np.random.randn(10, 2) + [-2, 2],
np.random.randn(10, 2) + [2, -2],
]
)
Y = np.repeat(np.eye(3), 10, 0)
from matplotlib import pyplot as plt
plt.style.use("ggplot")
# %matplotlib inline
import matplotlib
matplotlib.rcParams["figure.figsize"] = (12, 6)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %%
m = LinearMulticlass(X, Y)
m
# %%
def closure():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
# %%
xx, yy = np.mgrid[-4:4:200j, -4:4:200j]
X_test = np.vstack([xx.flatten(), yy.flatten()]).T
f_test = np.dot(X_test, m.W.read_value()) + m.b.read_value()
p_test = np.exp(f_test)
p_test /= p_test.sum(1)[:, None]
# %%
plt.figure(figsize=(12, 6))
for i in range(3):
plt.contour(xx, yy, p_test[:, i].reshape(200, 200), [0.5], colors="k", linewidths=1)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %% [markdown]
# That concludes the new model example and this notebook. You might want to see for yourself that the `LinearMulticlass` model and its parameters have all the functionality demonstrated here. You could also add some priors and run Hamiltonian Monte Carlo using the HMC optimizer `gpflow.train.HMC` and its `sample` method. See [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb) for more information on running the sampler.
| true
| true
|
790233e50052c02361257b8c9267aae19af093d6
| 773
|
py
|
Python
|
src/data/data/__init__.py
|
autt/gathering-leto
|
37894d8d8ad0381a2aacbb38593325a882b030f5
|
[
"MIT"
] | null | null | null |
src/data/data/__init__.py
|
autt/gathering-leto
|
37894d8d8ad0381a2aacbb38593325a882b030f5
|
[
"MIT"
] | null | null | null |
src/data/data/__init__.py
|
autt/gathering-leto
|
37894d8d8ad0381a2aacbb38593325a882b030f5
|
[
"MIT"
] | null | null | null |
import github
import pandas as pd
def get_issues(repo_addr):
g = github.Github()
repo = g.get_repo(repo_addr)
return repo.get_issues()
def fetch_issue_activity(repo_addr):
g = github.Github()
issues = g.get_repo(repo_addr).get_issues(state="all")
events = []
for issue in issues:
if issue.pull_request is not None:
continue
events.append((issue.created_at, 1))
if issue.state == "closed":
events.append((issue.closed_at, -1))
df = pd.DataFrame(events, columns=["date", "action"])
df.sort_values("date", inplace=True)
df["open"] = df["action"].cumsum()
df["total_events"] = abs(df["action"]).cumsum()
df["closed"] = (df["total_events"] - df["open"]) // 2
return df
| 24.15625
| 58
| 0.614489
|
import github
import pandas as pd
def get_issues(repo_addr):
g = github.Github()
repo = g.get_repo(repo_addr)
return repo.get_issues()
def fetch_issue_activity(repo_addr):
g = github.Github()
issues = g.get_repo(repo_addr).get_issues(state="all")
events = []
for issue in issues:
if issue.pull_request is not None:
continue
events.append((issue.created_at, 1))
if issue.state == "closed":
events.append((issue.closed_at, -1))
df = pd.DataFrame(events, columns=["date", "action"])
df.sort_values("date", inplace=True)
df["open"] = df["action"].cumsum()
df["total_events"] = abs(df["action"]).cumsum()
df["closed"] = (df["total_events"] - df["open"]) // 2
return df
| true
| true
|
7902343dc241ceeebd3d11296237b38ae4cfc1d6
| 1,034
|
py
|
Python
|
wiki/views.py
|
ebonnecab/makewiki
|
c1f83be59730ac1dd0343ffaadeb0c5a4152ecab
|
[
"MIT"
] | null | null | null |
wiki/views.py
|
ebonnecab/makewiki
|
c1f83be59730ac1dd0343ffaadeb0c5a4152ecab
|
[
"MIT"
] | 5
|
2021-03-19T08:24:16.000Z
|
2022-02-10T14:13:59.000Z
|
wiki/views.py
|
ebonnecab/makewiki
|
c1f83be59730ac1dd0343ffaadeb0c5a4152ecab
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from wiki.models import Page
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import get_object_or_404,render
class PageList(ListView):
"""
This view grabs all the pages out of the database
returns a list of each unique wiki page for the
user to access on the website through 'list.html'
"""
model = Page
def get(self, request):
""" Returns a list of wiki pages. """
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context)
class PageDetailView(DetailView):
"""
This view returns a page for a unique wiki using it's slug as an identifier
or a 404 message if the page does not exist
"""
model = Page
def get(self, request, slug):
wiki = get_object_or_404(Page, slug=slug)
return render(request, 'page.html', {'wiki': wiki})
def post(self, request, slug):
pass
| 29.542857
| 79
| 0.675048
|
from django.shortcuts import render
from wiki.models import Page
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import get_object_or_404,render
class PageList(ListView):
model = Page
def get(self, request):
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context)
class PageDetailView(DetailView):
model = Page
def get(self, request, slug):
wiki = get_object_or_404(Page, slug=slug)
return render(request, 'page.html', {'wiki': wiki})
def post(self, request, slug):
pass
| true
| true
|
790234a17a1f74001dc6b1699b4850177c06f8f0
| 354
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractReMonsterWiki(item):
"""
Parser for 'Re:Monster Wiki'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
| 32.181818
| 89
| 0.714689
|
def extractReMonsterWiki(item):
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
| true
| true
|
7902350138dbedc0ed6f27a1448c4d252b4b5c83
| 745
|
py
|
Python
|
providers.py
|
kwanj-k/sibsco
|
c5642ea0c908457cd145d4df0a485bedd5f88166
|
[
"MIT"
] | null | null | null |
providers.py
|
kwanj-k/sibsco
|
c5642ea0c908457cd145d4df0a485bedd5f88166
|
[
"MIT"
] | 1
|
2021-06-02T00:26:43.000Z
|
2021-06-02T00:26:43.000Z
|
providers.py
|
kwanj-k/sibsco
|
c5642ea0c908457cd145d4df0a485bedd5f88166
|
[
"MIT"
] | null | null | null |
""" Third party api wrappers"""
import os
import json
import nexmo
import africastalking
username = os.getenv('africastalking_username')
api_key = os.getenv('africastalking_api_key')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
class ProvidersWrapper:
""" Class with all the thirdy party helper functions"""
def send_message(number, message):
client = nexmo.Client(key=os.getenv('nexmokey'), secret=os.getenv('nexmosecret'))
response = client.send_message({
'from': 'Nexmo',
'to': number,
'text': message,
})
if response["messages"][0]["status"] != "0":
response = sms.send(message, ['+' + number])
return response
| 27.592593
| 89
| 0.641611
|
import os
import json
import nexmo
import africastalking
username = os.getenv('africastalking_username')
api_key = os.getenv('africastalking_api_key')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
class ProvidersWrapper:
def send_message(number, message):
client = nexmo.Client(key=os.getenv('nexmokey'), secret=os.getenv('nexmosecret'))
response = client.send_message({
'from': 'Nexmo',
'to': number,
'text': message,
})
if response["messages"][0]["status"] != "0":
response = sms.send(message, ['+' + number])
return response
| true
| true
|
7902350dee2a3d3b50074578b7417126b6253c64
| 3,756
|
py
|
Python
|
external/sbml/bindings/python/test/sbml/TestConstraint_newSetters.py
|
dchandran/evolvenetworks
|
072f9e1292552f691a86457ffd16a5743724fb5e
|
[
"BSD-3-Clause"
] | 1
|
2019-08-22T17:17:41.000Z
|
2019-08-22T17:17:41.000Z
|
external/sbml/bindings/python/test/sbml/TestConstraint_newSetters.py
|
dchandran/evolvenetworks
|
072f9e1292552f691a86457ffd16a5743724fb5e
|
[
"BSD-3-Clause"
] | null | null | null |
external/sbml/bindings/python/test/sbml/TestConstraint_newSetters.py
|
dchandran/evolvenetworks
|
072f9e1292552f691a86457ffd16a5743724fb5e
|
[
"BSD-3-Clause"
] | null | null | null |
#
# @file TestConstraint_newSetters.py
# @brief Constraint unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# This test file was converted from src/sbml/test/TestConstraint_newSetters.c
# with the help of conversion sciprt (ctest_converter.pl).
#
#<!---------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2009 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
#--------------------------------------------------------------------------->*/
import sys
import unittest
import libsbml
class TestConstraint_newSetters(unittest.TestCase):
C = None
def setUp(self):
self.C = libsbml.Constraint(2,4)
if (self.C == None):
pass
pass
def tearDown(self):
self.C = None
pass
def test_Constraint_setMath1(self):
math = libsbml.parseFormula("2 * k")
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() != math )
self.assertEqual( True, self.C.isSetMath() )
i = self.C.setMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() == None )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_TIMES)
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMessage1(self):
node = libsbml.XMLNode()
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assert_( self.C.isSetMessage() == False )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def test_Constraint_setMessage2(self):
text = libsbml.XMLNode.convertStringToXMLNode(" Some text ",None)
triple = libsbml.XMLTriple("p", "http://www.w3.org/1999/xhtml", "")
att = libsbml.XMLAttributes()
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.w3.org/1999/xhtml", "")
p = libsbml.XMLNode(triple,att,xmlns)
p.addChild(text)
triple1 = libsbml.XMLTriple("message", "", "")
att1 = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple1,att1)
node.addChild(p)
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.isSetMessage() == True )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConstraint_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| 32.947368
| 79
| 0.660011
|
import sys
import unittest
import libsbml
class TestConstraint_newSetters(unittest.TestCase):
C = None
def setUp(self):
self.C = libsbml.Constraint(2,4)
if (self.C == None):
pass
pass
def tearDown(self):
self.C = None
pass
def test_Constraint_setMath1(self):
math = libsbml.parseFormula("2 * k")
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() != math )
self.assertEqual( True, self.C.isSetMath() )
i = self.C.setMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() == None )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_TIMES)
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMessage1(self):
node = libsbml.XMLNode()
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assert_( self.C.isSetMessage() == False )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def test_Constraint_setMessage2(self):
text = libsbml.XMLNode.convertStringToXMLNode(" Some text ",None)
triple = libsbml.XMLTriple("p", "http://www.w3.org/1999/xhtml", "")
att = libsbml.XMLAttributes()
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.w3.org/1999/xhtml", "")
p = libsbml.XMLNode(triple,att,xmlns)
p.addChild(text)
triple1 = libsbml.XMLTriple("message", "", "")
att1 = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple1,att1)
node.addChild(p)
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.isSetMessage() == True )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConstraint_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| true
| true
|
7902351414bba97b6a86f9d7dd4e4476e060249d
| 4,785
|
py
|
Python
|
pymatgen/io/lammps/sets.py
|
frssp/pymatgen
|
bdd977f065b66191557c7398b31a1571bc541fdb
|
[
"MIT"
] | 5
|
2019-04-11T20:57:38.000Z
|
2021-12-01T05:00:42.000Z
|
pymatgen/io/lammps/sets.py
|
frssp/pymatgen
|
bdd977f065b66191557c7398b31a1571bc541fdb
|
[
"MIT"
] | null | null | null |
pymatgen/io/lammps/sets.py
|
frssp/pymatgen
|
bdd977f065b66191557c7398b31a1571bc541fdb
|
[
"MIT"
] | 3
|
2019-10-14T19:47:34.000Z
|
2020-07-02T08:10:45.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for reading and generating Lammps inputset.
For the ease of management we divide LAMMPS input into 2 files:
1.Data file: All structure related settings such as the atomic positions,
bonds, angles, dihedrals, corresponding parametrizations etc are
set in the data file.
2. Control/input file: This is the main input file that should be fed to the
lammps binary. The main input file consists of the path to the
afore-mentioned data file and the job control parameters such as
the ensemble type(NVT, NPT etc), max number of iterations etc.
"""
import os
import six
from monty.json import MSONable, MontyDecoder
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.input import LammpsInput
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
class LammpsInputSet(MSONable):
def __init__(self, name, lammps_input, lammps_data=None,
data_filename="in.data", user_lammps_settings=None):
"""
Implementation of LammpsInputSet that is initialized from a dict
settings. It is typically used by other LammpsInputSets for
initialization from json or yaml source files.
Args:
name (str): A name for the input set.
lammps_input (LammpsInput): The config dictionary to use.
lammps_data (LammpsData): LammpsData object
data_filename (str): name of the the lammps data file.
Note: this will override the value for 'data_file' key in lammps_input
user_lammps_settings (dict): User lammps settings. This allows a user
to override lammps settings, e.g., setting a different force field
or bond type.
"""
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings["data_file"] = data_filename
self.user_lammps_settings = user_lammps_settings or {}
self.lammps_input.settings.update(self.user_lammps_settings)
def write_input(self, input_filename, data_filename=None):
"""
Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this
"""
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if data_filename and ("data_file" in self.lammps_input.settings):
self.lammps_input.settings["data_file"] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
# write the data file if present
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename)
@classmethod
def from_file(cls, name, input_template, user_settings,
lammps_data=None, data_filename="in.data"):
"""
Returns LammpsInputSet from input file template and input data.
Args:
name (str)
input_template (string): path to the input template file.
user_settings (dict): User lammps settings, the keys must
correspond to the keys in the template.
lammps_data (string/LammpsData): path to the
data file or an appropriate object
data_filename (string): name of the the lammps data file.
Returns:
LammpsInputSet
"""
user_settings["data_file"] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data,
data_filename=data_filename)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
d["lammps_input"] = self.lammps_input.as_dict()
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if k not in ["@module", "@class", "lammps_input"]}
decoded["lammps_input"] = LammpsInput.from_dict(d["lammps_input"])
return cls(**decoded)
| 40.210084
| 86
| 0.660815
|
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import six
from monty.json import MSONable, MontyDecoder
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.input import LammpsInput
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
class LammpsInputSet(MSONable):
def __init__(self, name, lammps_input, lammps_data=None,
data_filename="in.data", user_lammps_settings=None):
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings["data_file"] = data_filename
self.user_lammps_settings = user_lammps_settings or {}
self.lammps_input.settings.update(self.user_lammps_settings)
def write_input(self, input_filename, data_filename=None):
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if data_filename and ("data_file" in self.lammps_input.settings):
self.lammps_input.settings["data_file"] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename)
@classmethod
def from_file(cls, name, input_template, user_settings,
lammps_data=None, data_filename="in.data"):
user_settings["data_file"] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data,
data_filename=data_filename)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
d["lammps_input"] = self.lammps_input.as_dict()
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if k not in ["@module", "@class", "lammps_input"]}
decoded["lammps_input"] = LammpsInput.from_dict(d["lammps_input"])
return cls(**decoded)
| true
| true
|
790235b18d33cb3f0ac0276aa588cf0afb57320e
| 10,714
|
py
|
Python
|
data.py
|
gasteigerjo/gdc
|
996bc47acffd86bc9bb1df3293c87a3c7573744f
|
[
"MIT"
] | 2
|
2022-03-14T11:54:20.000Z
|
2022-03-23T20:25:08.000Z
|
data.py
|
gasteigerjo/gdc
|
996bc47acffd86bc9bb1df3293c87a3c7573744f
|
[
"MIT"
] | null | null | null |
data.py
|
gasteigerjo/gdc
|
996bc47acffd86bc9bb1df3293c87a3c7573744f
|
[
"MIT"
] | null | null | null |
__author__ = "Stefan Weißenberger and Johannes Gasteiger"
__license__ = "MIT"
import os
import numpy as np
from scipy.linalg import expm
import torch
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.datasets import Planetoid, Amazon, Coauthor
from seeds import development_seed
DATA_PATH = 'data'
def get_dataset(name: str, use_lcc: bool = True) -> InMemoryDataset:
path = os.path.join(DATA_PATH, name)
if name in ['Cora', 'Citeseer', 'Pubmed']:
dataset = Planetoid(path, name)
elif name in ['Computers', 'Photo']:
dataset = Amazon(path, name)
elif name == 'CoauthorCS':
dataset = Coauthor(path, 'CS')
else:
raise Exception('Unknown dataset.')
if use_lcc:
lcc = get_largest_connected_component(dataset)
x_new = dataset.data.x[lcc]
y_new = dataset.data.y[lcc]
row, col = dataset.data.edge_index.numpy()
edges = [[i, j] for i, j in zip(row, col) if i in lcc and j in lcc]
edges = remap_edges(edges, get_node_mapper(lcc))
data = Data(
x=x_new,
edge_index=torch.LongTensor(edges),
y=y_new,
train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool)
)
dataset.data = data
return dataset
def get_component(dataset: InMemoryDataset, start: int = 0) -> set:
visited_nodes = set()
queued_nodes = set([start])
row, col = dataset.data.edge_index.numpy()
while queued_nodes:
current_node = queued_nodes.pop()
visited_nodes.update([current_node])
neighbors = col[np.where(row == current_node)[0]]
neighbors = [n for n in neighbors if n not in visited_nodes and n not in queued_nodes]
queued_nodes.update(neighbors)
return visited_nodes
def get_largest_connected_component(dataset: InMemoryDataset) -> np.ndarray:
remaining_nodes = set(range(dataset.data.x.shape[0]))
comps = []
while remaining_nodes:
start = min(remaining_nodes)
comp = get_component(dataset, start)
comps.append(comp)
remaining_nodes = remaining_nodes.difference(comp)
return np.array(list(comps[np.argmax(list(map(len, comps)))]))
def get_node_mapper(lcc: np.ndarray) -> dict:
mapper = {}
counter = 0
for node in lcc:
mapper[node] = counter
counter += 1
return mapper
def remap_edges(edges: list, mapper: dict) -> list:
row = [e[0] for e in edges]
col = [e[1] for e in edges]
row = list(map(lambda x: mapper[x], row))
col = list(map(lambda x: mapper[x], col))
return [row, col]
def get_adj_matrix(dataset: InMemoryDataset) -> np.ndarray:
num_nodes = dataset.data.x.shape[0]
adj_matrix = np.zeros(shape=(num_nodes, num_nodes))
for i, j in zip(dataset.data.edge_index[0], dataset.data.edge_index[1]):
adj_matrix[i, j] = 1.
return adj_matrix
def get_ppr_matrix(
adj_matrix: np.ndarray,
alpha: float = 0.1) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return alpha * np.linalg.inv(np.eye(num_nodes) - (1 - alpha) * H)
def get_heat_matrix(
adj_matrix: np.ndarray,
t: float = 5.0) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return expm(-t * (np.eye(num_nodes) - H))
def get_top_k_matrix(A: np.ndarray, k: int = 128) -> np.ndarray:
num_nodes = A.shape[0]
row_idx = np.arange(num_nodes)
A[A.argsort(axis=0)[:num_nodes - k], row_idx] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1 # avoid dividing by zero
return A/norm
def get_clipped_matrix(A: np.ndarray, eps: float = 0.01) -> np.ndarray:
num_nodes = A.shape[0]
A[A < eps] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1 # avoid dividing by zero
return A/norm
def set_train_val_test_split(
seed: int,
data: Data,
num_development: int = 1500,
num_per_class: int = 20) -> Data:
rnd_state = np.random.RandomState(development_seed)
num_nodes = data.y.shape[0]
development_idx = rnd_state.choice(num_nodes, num_development, replace=False)
test_idx = [i for i in np.arange(num_nodes) if i not in development_idx]
train_idx = []
rnd_state = np.random.RandomState(seed)
for c in range(data.y.max() + 1):
class_idx = development_idx[np.where(data.y[development_idx].cpu() == c)[0]]
train_idx.extend(rnd_state.choice(class_idx, num_per_class, replace=False))
val_idx = [i for i in development_idx if i not in train_idx]
def get_mask(idx):
mask = torch.zeros(num_nodes, dtype=torch.bool)
mask[idx] = 1
return mask
data.train_mask = get_mask(train_idx)
data.val_mask = get_mask(val_idx)
data.test_mask = get_mask(test_idx)
return data
class PPRDataset(InMemoryDataset):
"""
Dataset preprocessed with GDC using PPR diffusion.
Note that this implementations is not scalable
since we directly invert the adjacency matrix.
"""
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
alpha: float = 0.1,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.alpha = alpha
self.k = k
self.eps = eps
super(PPRDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
# generate adjacency matrix from sparse representation
adj_matrix = get_adj_matrix(base)
# obtain exact PPR matrix
ppr_matrix = get_ppr_matrix(adj_matrix,
alpha=self.alpha)
if self.k:
print(f'Selecting top {self.k} edges per node.')
ppr_matrix = get_top_k_matrix(ppr_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
ppr_matrix = get_clipped_matrix(ppr_matrix, eps=self.eps)
else:
raise ValueError
# create PyG Data object
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(ppr_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(ppr_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_ppr_alpha={self.alpha}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
class HeatDataset(InMemoryDataset):
"""
Dataset preprocessed with GDC using heat kernel diffusion.
Note that this implementations is not scalable
since we directly calculate the matrix exponential
of the adjacency matrix.
"""
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
t: float = 5.0,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.t = t
self.k = k
self.eps = eps
super(HeatDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
# generate adjacency matrix from sparse representation
adj_matrix = get_adj_matrix(base)
# get heat matrix as described in Berberidis et al., 2019
heat_matrix = get_heat_matrix(adj_matrix,
t=self.t)
if self.k:
print(f'Selecting top {self.k} edges per node.')
heat_matrix = get_top_k_matrix(heat_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
heat_matrix = get_clipped_matrix(heat_matrix, eps=self.eps)
else:
raise ValueError
# create PyG Data object
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(heat_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(heat_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_heat_t={self.t}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
| 33.376947
| 98
| 0.589696
|
__author__ = "Stefan Weißenberger and Johannes Gasteiger"
__license__ = "MIT"
import os
import numpy as np
from scipy.linalg import expm
import torch
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.datasets import Planetoid, Amazon, Coauthor
from seeds import development_seed
DATA_PATH = 'data'
def get_dataset(name: str, use_lcc: bool = True) -> InMemoryDataset:
path = os.path.join(DATA_PATH, name)
if name in ['Cora', 'Citeseer', 'Pubmed']:
dataset = Planetoid(path, name)
elif name in ['Computers', 'Photo']:
dataset = Amazon(path, name)
elif name == 'CoauthorCS':
dataset = Coauthor(path, 'CS')
else:
raise Exception('Unknown dataset.')
if use_lcc:
lcc = get_largest_connected_component(dataset)
x_new = dataset.data.x[lcc]
y_new = dataset.data.y[lcc]
row, col = dataset.data.edge_index.numpy()
edges = [[i, j] for i, j in zip(row, col) if i in lcc and j in lcc]
edges = remap_edges(edges, get_node_mapper(lcc))
data = Data(
x=x_new,
edge_index=torch.LongTensor(edges),
y=y_new,
train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool)
)
dataset.data = data
return dataset
def get_component(dataset: InMemoryDataset, start: int = 0) -> set:
visited_nodes = set()
queued_nodes = set([start])
row, col = dataset.data.edge_index.numpy()
while queued_nodes:
current_node = queued_nodes.pop()
visited_nodes.update([current_node])
neighbors = col[np.where(row == current_node)[0]]
neighbors = [n for n in neighbors if n not in visited_nodes and n not in queued_nodes]
queued_nodes.update(neighbors)
return visited_nodes
def get_largest_connected_component(dataset: InMemoryDataset) -> np.ndarray:
remaining_nodes = set(range(dataset.data.x.shape[0]))
comps = []
while remaining_nodes:
start = min(remaining_nodes)
comp = get_component(dataset, start)
comps.append(comp)
remaining_nodes = remaining_nodes.difference(comp)
return np.array(list(comps[np.argmax(list(map(len, comps)))]))
def get_node_mapper(lcc: np.ndarray) -> dict:
mapper = {}
counter = 0
for node in lcc:
mapper[node] = counter
counter += 1
return mapper
def remap_edges(edges: list, mapper: dict) -> list:
row = [e[0] for e in edges]
col = [e[1] for e in edges]
row = list(map(lambda x: mapper[x], row))
col = list(map(lambda x: mapper[x], col))
return [row, col]
def get_adj_matrix(dataset: InMemoryDataset) -> np.ndarray:
num_nodes = dataset.data.x.shape[0]
adj_matrix = np.zeros(shape=(num_nodes, num_nodes))
for i, j in zip(dataset.data.edge_index[0], dataset.data.edge_index[1]):
adj_matrix[i, j] = 1.
return adj_matrix
def get_ppr_matrix(
adj_matrix: np.ndarray,
alpha: float = 0.1) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return alpha * np.linalg.inv(np.eye(num_nodes) - (1 - alpha) * H)
def get_heat_matrix(
adj_matrix: np.ndarray,
t: float = 5.0) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return expm(-t * (np.eye(num_nodes) - H))
def get_top_k_matrix(A: np.ndarray, k: int = 128) -> np.ndarray:
num_nodes = A.shape[0]
row_idx = np.arange(num_nodes)
A[A.argsort(axis=0)[:num_nodes - k], row_idx] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1
return A/norm
def get_clipped_matrix(A: np.ndarray, eps: float = 0.01) -> np.ndarray:
num_nodes = A.shape[0]
A[A < eps] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1
return A/norm
def set_train_val_test_split(
seed: int,
data: Data,
num_development: int = 1500,
num_per_class: int = 20) -> Data:
rnd_state = np.random.RandomState(development_seed)
num_nodes = data.y.shape[0]
development_idx = rnd_state.choice(num_nodes, num_development, replace=False)
test_idx = [i for i in np.arange(num_nodes) if i not in development_idx]
train_idx = []
rnd_state = np.random.RandomState(seed)
for c in range(data.y.max() + 1):
class_idx = development_idx[np.where(data.y[development_idx].cpu() == c)[0]]
train_idx.extend(rnd_state.choice(class_idx, num_per_class, replace=False))
val_idx = [i for i in development_idx if i not in train_idx]
def get_mask(idx):
mask = torch.zeros(num_nodes, dtype=torch.bool)
mask[idx] = 1
return mask
data.train_mask = get_mask(train_idx)
data.val_mask = get_mask(val_idx)
data.test_mask = get_mask(test_idx)
return data
class PPRDataset(InMemoryDataset):
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
alpha: float = 0.1,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.alpha = alpha
self.k = k
self.eps = eps
super(PPRDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
adj_matrix = get_adj_matrix(base)
ppr_matrix = get_ppr_matrix(adj_matrix,
alpha=self.alpha)
if self.k:
print(f'Selecting top {self.k} edges per node.')
ppr_matrix = get_top_k_matrix(ppr_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
ppr_matrix = get_clipped_matrix(ppr_matrix, eps=self.eps)
else:
raise ValueError
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(ppr_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(ppr_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_ppr_alpha={self.alpha}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
class HeatDataset(InMemoryDataset):
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
t: float = 5.0,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.t = t
self.k = k
self.eps = eps
super(HeatDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
adj_matrix = get_adj_matrix(base)
heat_matrix = get_heat_matrix(adj_matrix,
t=self.t)
if self.k:
print(f'Selecting top {self.k} edges per node.')
heat_matrix = get_top_k_matrix(heat_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
heat_matrix = get_clipped_matrix(heat_matrix, eps=self.eps)
else:
raise ValueError
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(heat_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(heat_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_heat_t={self.t}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
| true
| true
|
790235b86183ea460af3ada26fdfb231b0d56329
| 3,509
|
py
|
Python
|
src/genie/libs/parser/dnac/interface.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/dnac/interface.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/dnac/interface.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | 1
|
2021-07-07T18:07:56.000Z
|
2021-07-07T18:07:56.000Z
|
"""
interface.py
DNAC parsers for the following show commands:
* /dna/intent/api/v1/interface
"""
import os
import logging
import pprint
import re
import unittest
from genie import parsergen
from collections import defaultdict
from ats.log.utils import banner
from genie.metaparser import MetaParser
from genie.metaparser.util import merge_dict, keynames_convert
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
logger = logging.getLogger(__name__)
# ============================================
# Schema for '/dna/intent/api/v1/interface'
# ============================================
class InterfaceSchema(MetaParser):
"""schema for /dna/intent/api/v1/interface, /dna/intent/api/v1/interface/{interface}"""
schema = {
Any(): {
"adminStatus": str,
Optional("className"): str,
Optional("description"): str,
"deviceId": str,
Optional("duplex"): str,
Optional("id"): str,
"ifIndex": str,
Optional("instanceTenantId"): str,
Optional("instanceUuid"): str,
"interfaceType": str,
Optional("ipv4Address"): str,
Optional("ipv4Mask"): str,
"isisSupport": str,
"lastUpdated": str,
Optional("macAddress"): str,
Optional("mappedPhysicalInterfaceId"): str,
Optional("mappedPhysicalInterfaceName"): str,
Optional("mediaType"): str,
Optional("nativeVlanId"): str,
"ospfSupport": str,
"pid": str,
"portMode": str,
"portName": str,
Optional("portType"): str,
"serialNo": str,
"series": str,
Optional("speed"): str,
"status": str,
Optional("vlanId"): str,
Optional("voiceVlan"): str
}
}
# ============================================
# Parser for '/dna/intent/api/v1/interface'
# ============================================
class Interface(InterfaceSchema):
"""parser for /dna/intent/api/v1/interface, /dna/intent/api/v1/interface/{interface}"""
cli_command = ['/dna/intent/api/v1/interface', '/dna/intent/api/v1/interface/{interface}']
def cli(self,interface="", output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.get(cmd).json()['response']
else:
out = output
result_dict={}
for intf_dict in out:
# remove None values
result_dict[intf_dict['portName']] = {k: v for k, v in intf_dict.items() if v is not None}
return result_dict
| 35.806122
| 102
| 0.455685
|
import os
import logging
import pprint
import re
import unittest
from genie import parsergen
from collections import defaultdict
from ats.log.utils import banner
from genie.metaparser import MetaParser
from genie.metaparser.util import merge_dict, keynames_convert
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
from genie.libs.parser.utils.common import Common
logger = logging.getLogger(__name__)
class InterfaceSchema(MetaParser):
schema = {
Any(): {
"adminStatus": str,
Optional("className"): str,
Optional("description"): str,
"deviceId": str,
Optional("duplex"): str,
Optional("id"): str,
"ifIndex": str,
Optional("instanceTenantId"): str,
Optional("instanceUuid"): str,
"interfaceType": str,
Optional("ipv4Address"): str,
Optional("ipv4Mask"): str,
"isisSupport": str,
"lastUpdated": str,
Optional("macAddress"): str,
Optional("mappedPhysicalInterfaceId"): str,
Optional("mappedPhysicalInterfaceName"): str,
Optional("mediaType"): str,
Optional("nativeVlanId"): str,
"ospfSupport": str,
"pid": str,
"portMode": str,
"portName": str,
Optional("portType"): str,
"serialNo": str,
"series": str,
Optional("speed"): str,
"status": str,
Optional("vlanId"): str,
Optional("voiceVlan"): str
}
}
class Interface(InterfaceSchema):
cli_command = ['/dna/intent/api/v1/interface', '/dna/intent/api/v1/interface/{interface}']
def cli(self,interface="", output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.get(cmd).json()['response']
else:
out = output
result_dict={}
for intf_dict in out:
result_dict[intf_dict['portName']] = {k: v for k, v in intf_dict.items() if v is not None}
return result_dict
| true
| true
|
790236a048130bda458133b39792d5a4070de4f2
| 409
|
py
|
Python
|
weblog/migrations/0012_userdetail_phone.py
|
mmohajer9/Resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | 1
|
2019-07-28T10:09:26.000Z
|
2019-07-28T10:09:26.000Z
|
weblog/migrations/0012_userdetail_phone.py
|
mmohajer9/Resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | 8
|
2021-04-08T22:03:32.000Z
|
2022-02-10T09:35:46.000Z
|
weblog/migrations/0012_userdetail_phone.py
|
mmohajer9/resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-07-18 19:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblog', '0011_auto_20190718_1829'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| 21.526316
| 73
| 0.606357
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblog', '0011_auto_20190718_1829'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| true
| true
|
7902379c4506ba6e2265e4edddb03e6451a04607
| 20
|
py
|
Python
|
tests/__init__.py
|
igorcoding/os-simulation
|
1e76fdda75c138025950876a2e7b68e99a55c54a
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
igorcoding/os-simulation
|
1e76fdda75c138025950876a2e7b68e99a55c54a
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
igorcoding/os-simulation
|
1e76fdda75c138025950876a2e7b68e99a55c54a
|
[
"MIT"
] | null | null | null |
__author__ = 'igor'
| 10
| 19
| 0.7
|
__author__ = 'igor'
| true
| true
|
79023899cfd004871ee353f9f8be7fd27b9fb485
| 8,357
|
py
|
Python
|
homework_3/main.py
|
showerhhh/ComplexNetwork
|
344fadee4e85924f45263a43d2110dae2a9394fe
|
[
"MIT"
] | null | null | null |
homework_3/main.py
|
showerhhh/ComplexNetwork
|
344fadee4e85924f45263a43d2110dae2a9394fe
|
[
"MIT"
] | null | null | null |
homework_3/main.py
|
showerhhh/ComplexNetwork
|
344fadee4e85924f45263a43d2110dae2a9394fe
|
[
"MIT"
] | null | null | null |
import functools
import os
import random
import matplotlib.pyplot as plt
import networkx as nx
def make_graph(path):
G = nx.DiGraph()
with open(path, 'r') as f:
lines = f.readlines()
# random.seed(0)
sample_nums = int(len(lines) * 0.00006)
lines = random.sample(lines, sample_nums)
lines = [line.strip() for line in lines]
for line in lines:
edge_node = line.split(' ')
source = int(edge_node[0])
target = int(edge_node[1])
G.add_edge(source, target)
return G
def degree_centrality(G):
# 节点的度中心性
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.degree()}
return centrality
def closeness_centrality(G, u=None, distance=None, wf_improved=True):
# 节点的接近中心性
if G.is_directed():
G = G.reverse()
if distance is not None:
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=distance
)
else:
path_length = nx.single_source_shortest_path_length
if u is None:
nodes = G.nodes
else:
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = path_length(G, n)
totsp = sum(sp.values())
len_G = len(G)
_closeness_centrality = 0.0
if totsp > 0.0 and len_G > 1:
_closeness_centrality = (len(sp) - 1.0) / totsp
if wf_improved:
s = (len(sp) - 1.0) / (len_G - 1)
_closeness_centrality *= s
closeness_centrality[n] = _closeness_centrality
if u is not None:
return closeness_centrality[u]
else:
return closeness_centrality
def core_number(G):
# 节点的核数
degrees = dict(G.degree())
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight="weight",
dangling=None):
# 节点的pagerank值
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(nstart.values()))
x = {k: v / s for k, v in nstart.items()}
if personalization is None:
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = {k: v / s for k, v in personalization.items()}
if dangling is None:
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = {k: v / s for k, v in dangling.items()}
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
# 节点的hub值和authority值
if len(G) == 0:
return {}, {}
if nstart is None:
h = dict.fromkeys(G, 1.0 / G.number_of_nodes())
else:
h = nstart
s = 1.0 / sum(h.values())
for k in h:
h[k] *= s
for _ in range(max_iter):
hlast = h
h = dict.fromkeys(hlast.keys(), 0)
a = dict.fromkeys(hlast.keys(), 0)
for n in h:
for nbr in G[n]:
a[nbr] += hlast[n] * G[n][nbr].get("weight", 1)
for n in h:
for nbr in G[n]:
h[n] += a[nbr] * G[n][nbr].get("weight", 1)
s = 1.0 / max(h.values())
for n in h:
h[n] *= s
s = 1.0 / max(a.values())
for n in a:
a[n] *= s
err = sum([abs(h[n] - hlast[n]) for n in h])
if err < tol:
break
else:
raise nx.PowerIterationFailedConvergence(max_iter)
if normalized:
s = 1.0 / sum(a.values())
for n in a:
a[n] *= s
s = 1.0 / sum(h.values())
for n in h:
h[n] *= s
return h, a
def metrics_fuse(G):
degree = degree_centrality(G)
closeness = closeness_centrality(G)
betweenness = nx.betweenness_centrality(G) # 节点的介数中心性
core = core_number(G)
pageranks = pagerank(G)
hubs, authorities = hits(G)
fused = dict()
for node in G.nodes:
deg = degree[node]
cl = closeness[node]
bet = betweenness[node]
co = core[node]
pr = pageranks[node]
auth = authorities[node]
M = 0.05 * deg + 0.15 * cl + 0.1 * bet + 0.3 * co + 0.25 * pr + 0.15 * auth
fused[node] = M
pageranks = sorted(pageranks.items(), key=lambda x: x[1], reverse=True)
print("使用PageRank算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(pageranks[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in pageranks[:10]]
other_nodes = [k for k, v in pageranks[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in pageranks[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./pagerank_result.png")
plt.show()
print("---------------------------------------------")
authorities = sorted(authorities.items(), key=lambda x: x[1], reverse=True)
print("使用HITS算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(authorities[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in authorities[:10]]
other_nodes = [k for k, v in authorities[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in authorities[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./hits_result.png")
plt.show()
print("---------------------------------------------")
fused = sorted(fused.items(), key=lambda x: x[1], reverse=True)
print("使用混合算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(fused[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:10]]
other_nodes = [k for k, v in fused[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./fused_result.png")
plt.show()
print("---------------------------------------------")
return fused
if __name__ == '__main__':
path = './课程设计数据集.txt'
if not os.path.exists(path):
print('未找到数据集')
exit(1)
G = make_graph(path)
metrics_fuse(G)
| 31.066914
| 105
| 0.551514
|
import functools
import os
import random
import matplotlib.pyplot as plt
import networkx as nx
def make_graph(path):
G = nx.DiGraph()
with open(path, 'r') as f:
lines = f.readlines()
sample_nums = int(len(lines) * 0.00006)
lines = random.sample(lines, sample_nums)
lines = [line.strip() for line in lines]
for line in lines:
edge_node = line.split(' ')
source = int(edge_node[0])
target = int(edge_node[1])
G.add_edge(source, target)
return G
def degree_centrality(G):
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.degree()}
return centrality
def closeness_centrality(G, u=None, distance=None, wf_improved=True):
if G.is_directed():
G = G.reverse()
if distance is not None:
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=distance
)
else:
path_length = nx.single_source_shortest_path_length
if u is None:
nodes = G.nodes
else:
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = path_length(G, n)
totsp = sum(sp.values())
len_G = len(G)
_closeness_centrality = 0.0
if totsp > 0.0 and len_G > 1:
_closeness_centrality = (len(sp) - 1.0) / totsp
if wf_improved:
s = (len(sp) - 1.0) / (len_G - 1)
_closeness_centrality *= s
closeness_centrality[n] = _closeness_centrality
if u is not None:
return closeness_centrality[u]
else:
return closeness_centrality
def core_number(G):
degrees = dict(G.degree())
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight="weight",
dangling=None):
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(nstart.values()))
x = {k: v / s for k, v in nstart.items()}
if personalization is None:
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = {k: v / s for k, v in personalization.items()}
if dangling is None:
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = {k: v / s for k, v in dangling.items()}
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
if len(G) == 0:
return {}, {}
if nstart is None:
h = dict.fromkeys(G, 1.0 / G.number_of_nodes())
else:
h = nstart
s = 1.0 / sum(h.values())
for k in h:
h[k] *= s
for _ in range(max_iter):
hlast = h
h = dict.fromkeys(hlast.keys(), 0)
a = dict.fromkeys(hlast.keys(), 0)
for n in h:
for nbr in G[n]:
a[nbr] += hlast[n] * G[n][nbr].get("weight", 1)
for n in h:
for nbr in G[n]:
h[n] += a[nbr] * G[n][nbr].get("weight", 1)
s = 1.0 / max(h.values())
for n in h:
h[n] *= s
s = 1.0 / max(a.values())
for n in a:
a[n] *= s
err = sum([abs(h[n] - hlast[n]) for n in h])
if err < tol:
break
else:
raise nx.PowerIterationFailedConvergence(max_iter)
if normalized:
s = 1.0 / sum(a.values())
for n in a:
a[n] *= s
s = 1.0 / sum(h.values())
for n in h:
h[n] *= s
return h, a
def metrics_fuse(G):
degree = degree_centrality(G)
closeness = closeness_centrality(G)
betweenness = nx.betweenness_centrality(G)
core = core_number(G)
pageranks = pagerank(G)
hubs, authorities = hits(G)
fused = dict()
for node in G.nodes:
deg = degree[node]
cl = closeness[node]
bet = betweenness[node]
co = core[node]
pr = pageranks[node]
auth = authorities[node]
M = 0.05 * deg + 0.15 * cl + 0.1 * bet + 0.3 * co + 0.25 * pr + 0.15 * auth
fused[node] = M
pageranks = sorted(pageranks.items(), key=lambda x: x[1], reverse=True)
print("使用PageRank算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(pageranks[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in pageranks[:10]]
other_nodes = [k for k, v in pageranks[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in pageranks[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./pagerank_result.png")
plt.show()
print("---------------------------------------------")
authorities = sorted(authorities.items(), key=lambda x: x[1], reverse=True)
print("使用HITS算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(authorities[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in authorities[:10]]
other_nodes = [k for k, v in authorities[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in authorities[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./hits_result.png")
plt.show()
print("---------------------------------------------")
fused = sorted(fused.items(), key=lambda x: x[1], reverse=True)
print("使用混合算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(fused[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:10]]
other_nodes = [k for k, v in fused[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./fused_result.png")
plt.show()
print("---------------------------------------------")
return fused
if __name__ == '__main__':
path = './课程设计数据集.txt'
if not os.path.exists(path):
print('未找到数据集')
exit(1)
G = make_graph(path)
metrics_fuse(G)
| true
| true
|
790239ddc17a2951e332a4c62c08477ad95f345b
| 481
|
py
|
Python
|
retry.py
|
IdeaBot/dev-addons
|
7dab098908ba335e3ef7470d7619939e1adf7ed3
|
[
"MIT"
] | null | null | null |
retry.py
|
IdeaBot/dev-addons
|
7dab098908ba335e3ef7470d7619939e1adf7ed3
|
[
"MIT"
] | null | null | null |
retry.py
|
IdeaBot/dev-addons
|
7dab098908ba335e3ef7470d7619939e1adf7ed3
|
[
"MIT"
] | null | null | null |
from libs import reaction as reactioncommand
class Reaction(reactioncommand.AdminReactionAddCommand):
'''Retries a text command
**Usage**
React to the message you want to re-run with the retry emoji
(The emoji is server-defined; ask your fellow server members for the correct emoji)'''
def matches(self, reaction, user):
return user == reaction.message.author
def action(self, reaction, user, client):
yield from client.on_message(reaction.message)
| 32.066667
| 86
| 0.742204
|
from libs import reaction as reactioncommand
class Reaction(reactioncommand.AdminReactionAddCommand):
def matches(self, reaction, user):
return user == reaction.message.author
def action(self, reaction, user, client):
yield from client.on_message(reaction.message)
| true
| true
|
79023a24d090139dbf75e46e1e692fe8d55cbcfe
| 15,128
|
py
|
Python
|
main.py
|
crowdbreaks/preprocess
|
7d4d375eebf9a36bfbfc166e49fc63e72eb41e12
|
[
"MIT"
] | 1
|
2020-06-08T13:43:53.000Z
|
2020-06-08T13:43:53.000Z
|
main.py
|
crowdbreaks/preprocess
|
7d4d375eebf9a36bfbfc166e49fc63e72eb41e12
|
[
"MIT"
] | 1
|
2020-12-23T09:41:42.000Z
|
2020-12-23T09:41:42.000Z
|
main.py
|
crowdbreaks/preprocess
|
7d4d375eebf9a36bfbfc166e49fc63e72eb41e12
|
[
"MIT"
] | null | null | null |
import argparse
import sys, os
import logging
from utils.misc import ArgParseDefault, add_bool_arg
USAGE_DESC = """
python main.py <command> [<args>]
Available commands:
init Initialize project
sync Sync project data from S3
parse Preprocessing of data to generate `/data/1_parsed`
sample Sample cleaned data to generate `data/2_sampled`
batch Creates a new batch of tweets from a sampled file in `/data/2_sampled`
clean_labels Clean labels generated from (`data/3_labelled`) and merge/clean to generate `/data/4_cleaned_labels`
stats Output various stats about project
split Splits data into training, dev and test data
prepare_predict Prepares parsed data for prediction with txcl
"""
STATS_USAGE_DESC = """
python main.py stats <command> [<args>]
Available commands:
all Run all
overview Show overview
sample Show sampling stats
annotation Show annotation summary
annotation_cleaned Show cleaned annotation summary
annotator_outliers Show annotator outliers
"""
class ArgParse(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
parser = ArgParseDefault(
description='',
usage=USAGE_DESC)
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
sys.exit(1)
getattr(self, args.command)()
def init(self):
from utils.task_helpers import init
parser = ArgParseDefault(description='Initialize project')
parser.add_argument('-p', '--project', type=str, required=False, default='', dest='project', help='Name of project to initialize')
parser.add_argument('--template', dest='template', action='store_true', default=False, help='Initialize project manually.')
args = parser.parse_args(sys.argv[2:])
init(args.project, args.template)
def sync(self):
from utils.task_helpers import sync
parser = ArgParseDefault(description='Sync project data from S3')
parser.add_argument('-s', '--source', choices=['all', 'streaming', 'annotation', 'media'], required=False, default='all', help='Type of data to be synced. By default sync all data belonging to this project.')
parser.add_argument('-l', '--last', required=False, type=int, help='Sync streaming data of last n days')
args = parser.parse_args(sys.argv[2:])
sync(data_type=args.source, last_n_days=args.last)
def parse(self):
import utils.processing.parse_tweets as parse_tweets
parser = ArgParseDefault(description='Preprocess raw data to create parquet files in `data/1_parsed`')
parser.add_argument('--no-parallel', dest='no_parallel', action='store_true', default=False, help='Do not run in parallel')
parser.add_argument('--extend', dest='extend', action='store_true', default=False, help='Extend existing parsed data')
parser.add_argument('--ray_num_cpus', type=int, default=None, help='Limit the number of worker processes for Ray during the memory intensive merge phase (by default using maximum worker processes)')
add_bool_arg(parser, 'extract_retweets', default=True, help='Extract top-level retweets')
add_bool_arg(parser, 'extract_quotes', default=True, help='Extract top-level quotes')
add_bool_arg(parser, 'omit_last_day', default=True, help='Omit parsing data from the last day')
args = parser.parse_args(sys.argv[2:])
parse_tweets.run(no_parallel=args.no_parallel, extract_retweets=args.extract_retweets, extract_quotes=args.extract_quotes, extend=args.extend, omit_last_day=args.omit_last_day, ray_num_cpus=args.ray_num_cpus)
def sample(self):
import utils.processing.sample_tweets as sample_tweets
parser = ArgParseDefault(description='Sample cleaned data to generate `data/2_sampled`')
parser.add_argument('-s', '--size', type=int, required=True, dest='size', help='Number of tweets to sample')
parser.add_argument('-bs', '--bin_size', type=int, required=False, help='Number of tweets per bin')
parser.add_argument('-m', '--mode', choices=['monthly', 'random'], required=False, default='random', help='Sampling mode. Random: Sample randomly. Monthly: Try to sample evenly within months.')
parser.add_argument('-l', '--langs', default=[], nargs='+', required=False, help='Filter by language(s)')
parser.add_argument('--contains_keywords', default=False, action='store_true', help='Only sample from tweets which include keywords')
parser.add_argument('--min_token_count', default=3, type=int, required=False, help='Minimum number of tokens')
parser.add_argument('--include_replies', default=False, action='store_true', help='Include replies')
parser.add_argument('--seed', type=int, required=False, default=None, help='Random state split')
parser.add_argument('--extend', action='store_true', help='Extending existing sample given by seed by removing already labelled tweets. If size is <= original sample size this has no effect except removing labelled tweets');
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--max_date', required=False, default=None, help='Sample until date (YYYY-MM-DD), default: No max')
parser.add_argument('--min_date', required=False, default=None, help='Sample from date (YYYY-MM-DD), default: No min')
args = parser.parse_args(sys.argv[2:])
sample_tweets.run(size=args.size, contains_keywords=args.contains_keywords, anonymize=args.anonymize, min_token_count=args.min_token_count, langs=args.langs, include_replies=args.include_replies, mode=args.mode, seed=args.seed, extend=args.extend, bin_size=args.bin_size, min_date=args.min_date, max_date=args.max_date)
def batch(self):
from utils.processing.sample_tweets import SampleGenerator
parser = ArgParseDefault(description='Generate new batch for labelling. As a result a new csv will be created in `data/2_sampled/batch_{batch_id}/`')
parser.add_argument('-N', '--num_tweets', type=int, default=None, help='The number of tweets to be generated in new batch')
parser.add_argument('-b', '--batch', type=int, default=None, help='The batch id to be generated, default: Automatically find next batch')
parser.add_argument('--ignore-previous', dest='ignore_previous', action='store_true', default=False, help='Also sample tweets from old batches which were not annotated')
parser.add_argument('--stats-only', dest='stats_only', action='store_true', default=False, help='Show stats only')
args = parser.parse_args(sys.argv[2:])
s = SampleGenerator()
if args.stats_only:
s.stats(ignore_previous=args.ignore_previous)
else:
s.generate_batch(num_tweets=args.num_tweets, batch_id=args.batch, ignore_previous=args.ignore_previous)
def clean_labels(self):
import utils.processing.clean_labels as clean_labels
parser = ArgParseDefault(description='Clean/merge labels from different batches to generate final training input')
parser.add_argument('-s', '--selection-criterion', dest='selection_criterion', choices=['majority', 'unanimous'], required=False, default='majority', help='Can be "majority" (use majority vote) or "unanimous" (only select tweets with perfect agreement)')
parser.add_argument('-l', '--min-labels-cutoff', dest='min_labels_cutoff', type=int, required=False, default=3, help='Discard all tweets having less than min_labels_cutoff annotations')
parser.add_argument('-a', '--selection-agreement', dest='selection_agreement', type=float, required=False, default=None, help='Consider only tweets with a certain level of annotation agreement. If provided overwrites selection_criterion param.')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other', 'all'], type=str, required=False, default='all', help='Annotation mode which was used. Can be `mturk`, `local`, `public`, `other` or `all`')
parser.add_argument('--is-relevant', dest='is_relevant', action='store_true', help='Filter tweets which have been annotated as relevant/related')
parser.add_argument('--exclude-incorrect', dest='exclude_incorrect', action='store_true', help='Remove annotations which have been manually flagged as incorrect')
parser.add_argument('--cutoff-worker-outliers', dest='cutoff_worker_outliers', type=float, default=None, help='Remove all annotations by workers who have agreement scores below certain Z-score threshold (a reasonable value would be 2 or 3)')
parser.add_argument('--allow-nan', dest='allow_nan', nargs='+', choices=['id', 'text', 'question_id', 'answer_id'], default=[], required=False, help='Allow certain fields to be NaN/empty (by default each annotation has to have the fields id, text, answer_id and question_id)')
parser.add_argument('--contains-keywords', dest='contains_keywords', default=False, action='store_true', help='Remove annotations in which text does not contain keywords')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output')
args = parser.parse_args(sys.argv[2:])
clean_labels.run_clean_labels(args.selection_criterion, args.min_labels_cutoff, args.selection_agreement, args.mode, args.is_relevant, args.exclude_incorrect, args.cutoff_worker_outliers, args.allow_nan, args.contains_keywords, args.verbose)
def stats(self):
from utils.task_helpers import stats
parser = ArgParseDefault(description='Output various stats about project', usage=STATS_USAGE_DESC)
parser.add_argument('command', choices=['all', 'overview', 'sample', 'annotation', 'annotator_outliers', 'annotation_cleaned'], help='Subcommand to run')
args = parser.parse_args(sys.argv[2:3])
if args.command == 'annotation':
parser = ArgParseDefault(description='Print stats about annotations')
parser.add_argument('-m', '--mode', choices=['all', 'mturk', 'local', 'public', 'other', '*'], type=str, required=False, default='all', help='Print stats for certain annotation modes only.')
args = parser.parse_args(sys.argv[3:])
stats('annotation', **vars(args))
elif args.command == 'annotator_outliers':
parser = ArgParseDefault(description='Find annotators which have under-performed compared to others')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other'], type=str, required=False, default='mturk', help='Print stats for certain annotation modes only.')
parser.add_argument('-b', '--batch-name', type=str, required=False, dest='batch_name', default='*', help='Only analyse for specific local/mturk batch name (this looks for a pattern in filename). Default: All data')
parser.add_argument('--agreement-cutoff', dest='agreement_cutoff', type=float, required=False, default=3, help='Z-value cutoff for inter-worker agreement deviation')
parser.add_argument('--time-cutoff', dest='time_cutoff', type=float, required=False, default=3, help='Z-value cutoff for average task duration per worker')
parser.add_argument('--min-tasks', dest='min_tasks', type=int, required=False, default=3, help='Min tasks for worker to have completed before considered as outlier')
parser.add_argument('--min-comparisons-count', dest='min_comparisons_count', type=int, required=False, default=20, help='Min number of questions to compare for a worker needed to compute agreement score')
args = parser.parse_args(sys.argv[3:])
stats('annotator_outliers', **vars(args))
else:
stats(args.command)
def split(self):
from utils.task_helpers import train_dev_test_split
parser = ArgParseDefault(description='Split annotated data into training and test data set')
parser.add_argument('--question', type=str, required=False, default='sentiment', help='Which data to load (has to be a valid question tag)')
parser.add_argument('--name', type=str, required=False, default='', help='In case there are multiple cleaned labelled data output files give name of file (without csv ending), default: No name provided (works only if a single file is present).')
parser.add_argument('--balanced-labels', dest='balanced_labels', action='store_true', default=False, help='Ensure equal label balance')
parser.add_argument('--all-questions', dest='all_questions', action='store_true', default=False, help='Generate files for all available question tags. This overwrites the `question` argument. Default: False.')
parser.add_argument('--label-tags', dest='label_tags', required=False, default=[], nargs='+', help='Only select examples with certain label tags')
parser.add_argument('--has-label', dest='has_label', required=False, default='', help='Only select examples which have also been tagged with certain label')
parser.add_argument('--dev-size', dest='dev_size', type=float, required=False, default=0.2, help='Fraction of dev size')
parser.add_argument('--test-size', dest='test_size', type=float, required=False, default=0.2, help='Fraction of test size')
parser.add_argument('--seed', type=int, required=False, default=42, help='Random state split')
args = parser.parse_args(sys.argv[2:])
train_dev_test_split(question=args.question, dev_size=args.dev_size, test_size=args.test_size, seed=args.seed, name=args.name, balanced_labels=args.balanced_labels, all_questions=args.all_questions, label_tags=args.label_tags, has_label=args.has_label)
def prepare_predict(self):
from utils.task_helpers import prepare_predict
parser = ArgParseDefault(description='Prepare data for prediction with the text-classification library. \
This function generates two files (1 for text 1 for IDs/created_at) under data/other. The text.csv file can then be predicted.')
parser.add_argument('--start_date', required=False, default=None, help='Filter start date')
parser.add_argument('--end_date', required=False, default=None, help='Filter end date')
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--url_filler', required=False, default='<url>', help='Filler for urls (if anonymize)')
parser.add_argument('--user_filler', required=False, default='@user', help='Filler for user names (if anonymize)')
args = parser.parse_args(sys.argv[2:])
prepare_predict(args)
if __name__ == '__main__':
ArgParse()
| 85.954545
| 327
| 0.708554
|
import argparse
import sys, os
import logging
from utils.misc import ArgParseDefault, add_bool_arg
USAGE_DESC = """
python main.py <command> [<args>]
Available commands:
init Initialize project
sync Sync project data from S3
parse Preprocessing of data to generate `/data/1_parsed`
sample Sample cleaned data to generate `data/2_sampled`
batch Creates a new batch of tweets from a sampled file in `/data/2_sampled`
clean_labels Clean labels generated from (`data/3_labelled`) and merge/clean to generate `/data/4_cleaned_labels`
stats Output various stats about project
split Splits data into training, dev and test data
prepare_predict Prepares parsed data for prediction with txcl
"""
STATS_USAGE_DESC = """
python main.py stats <command> [<args>]
Available commands:
all Run all
overview Show overview
sample Show sampling stats
annotation Show annotation summary
annotation_cleaned Show cleaned annotation summary
annotator_outliers Show annotator outliers
"""
class ArgParse(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
parser = ArgParseDefault(
description='',
usage=USAGE_DESC)
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
sys.exit(1)
getattr(self, args.command)()
def init(self):
from utils.task_helpers import init
parser = ArgParseDefault(description='Initialize project')
parser.add_argument('-p', '--project', type=str, required=False, default='', dest='project', help='Name of project to initialize')
parser.add_argument('--template', dest='template', action='store_true', default=False, help='Initialize project manually.')
args = parser.parse_args(sys.argv[2:])
init(args.project, args.template)
def sync(self):
from utils.task_helpers import sync
parser = ArgParseDefault(description='Sync project data from S3')
parser.add_argument('-s', '--source', choices=['all', 'streaming', 'annotation', 'media'], required=False, default='all', help='Type of data to be synced. By default sync all data belonging to this project.')
parser.add_argument('-l', '--last', required=False, type=int, help='Sync streaming data of last n days')
args = parser.parse_args(sys.argv[2:])
sync(data_type=args.source, last_n_days=args.last)
def parse(self):
import utils.processing.parse_tweets as parse_tweets
parser = ArgParseDefault(description='Preprocess raw data to create parquet files in `data/1_parsed`')
parser.add_argument('--no-parallel', dest='no_parallel', action='store_true', default=False, help='Do not run in parallel')
parser.add_argument('--extend', dest='extend', action='store_true', default=False, help='Extend existing parsed data')
parser.add_argument('--ray_num_cpus', type=int, default=None, help='Limit the number of worker processes for Ray during the memory intensive merge phase (by default using maximum worker processes)')
add_bool_arg(parser, 'extract_retweets', default=True, help='Extract top-level retweets')
add_bool_arg(parser, 'extract_quotes', default=True, help='Extract top-level quotes')
add_bool_arg(parser, 'omit_last_day', default=True, help='Omit parsing data from the last day')
args = parser.parse_args(sys.argv[2:])
parse_tweets.run(no_parallel=args.no_parallel, extract_retweets=args.extract_retweets, extract_quotes=args.extract_quotes, extend=args.extend, omit_last_day=args.omit_last_day, ray_num_cpus=args.ray_num_cpus)
def sample(self):
import utils.processing.sample_tweets as sample_tweets
parser = ArgParseDefault(description='Sample cleaned data to generate `data/2_sampled`')
parser.add_argument('-s', '--size', type=int, required=True, dest='size', help='Number of tweets to sample')
parser.add_argument('-bs', '--bin_size', type=int, required=False, help='Number of tweets per bin')
parser.add_argument('-m', '--mode', choices=['monthly', 'random'], required=False, default='random', help='Sampling mode. Random: Sample randomly. Monthly: Try to sample evenly within months.')
parser.add_argument('-l', '--langs', default=[], nargs='+', required=False, help='Filter by language(s)')
parser.add_argument('--contains_keywords', default=False, action='store_true', help='Only sample from tweets which include keywords')
parser.add_argument('--min_token_count', default=3, type=int, required=False, help='Minimum number of tokens')
parser.add_argument('--include_replies', default=False, action='store_true', help='Include replies')
parser.add_argument('--seed', type=int, required=False, default=None, help='Random state split')
parser.add_argument('--extend', action='store_true', help='Extending existing sample given by seed by removing already labelled tweets. If size is <= original sample size this has no effect except removing labelled tweets');
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--max_date', required=False, default=None, help='Sample until date (YYYY-MM-DD), default: No max')
parser.add_argument('--min_date', required=False, default=None, help='Sample from date (YYYY-MM-DD), default: No min')
args = parser.parse_args(sys.argv[2:])
sample_tweets.run(size=args.size, contains_keywords=args.contains_keywords, anonymize=args.anonymize, min_token_count=args.min_token_count, langs=args.langs, include_replies=args.include_replies, mode=args.mode, seed=args.seed, extend=args.extend, bin_size=args.bin_size, min_date=args.min_date, max_date=args.max_date)
def batch(self):
from utils.processing.sample_tweets import SampleGenerator
parser = ArgParseDefault(description='Generate new batch for labelling. As a result a new csv will be created in `data/2_sampled/batch_{batch_id}/`')
parser.add_argument('-N', '--num_tweets', type=int, default=None, help='The number of tweets to be generated in new batch')
parser.add_argument('-b', '--batch', type=int, default=None, help='The batch id to be generated, default: Automatically find next batch')
parser.add_argument('--ignore-previous', dest='ignore_previous', action='store_true', default=False, help='Also sample tweets from old batches which were not annotated')
parser.add_argument('--stats-only', dest='stats_only', action='store_true', default=False, help='Show stats only')
args = parser.parse_args(sys.argv[2:])
s = SampleGenerator()
if args.stats_only:
s.stats(ignore_previous=args.ignore_previous)
else:
s.generate_batch(num_tweets=args.num_tweets, batch_id=args.batch, ignore_previous=args.ignore_previous)
def clean_labels(self):
import utils.processing.clean_labels as clean_labels
parser = ArgParseDefault(description='Clean/merge labels from different batches to generate final training input')
parser.add_argument('-s', '--selection-criterion', dest='selection_criterion', choices=['majority', 'unanimous'], required=False, default='majority', help='Can be "majority" (use majority vote) or "unanimous" (only select tweets with perfect agreement)')
parser.add_argument('-l', '--min-labels-cutoff', dest='min_labels_cutoff', type=int, required=False, default=3, help='Discard all tweets having less than min_labels_cutoff annotations')
parser.add_argument('-a', '--selection-agreement', dest='selection_agreement', type=float, required=False, default=None, help='Consider only tweets with a certain level of annotation agreement. If provided overwrites selection_criterion param.')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other', 'all'], type=str, required=False, default='all', help='Annotation mode which was used. Can be `mturk`, `local`, `public`, `other` or `all`')
parser.add_argument('--is-relevant', dest='is_relevant', action='store_true', help='Filter tweets which have been annotated as relevant/related')
parser.add_argument('--exclude-incorrect', dest='exclude_incorrect', action='store_true', help='Remove annotations which have been manually flagged as incorrect')
parser.add_argument('--cutoff-worker-outliers', dest='cutoff_worker_outliers', type=float, default=None, help='Remove all annotations by workers who have agreement scores below certain Z-score threshold (a reasonable value would be 2 or 3)')
parser.add_argument('--allow-nan', dest='allow_nan', nargs='+', choices=['id', 'text', 'question_id', 'answer_id'], default=[], required=False, help='Allow certain fields to be NaN/empty (by default each annotation has to have the fields id, text, answer_id and question_id)')
parser.add_argument('--contains-keywords', dest='contains_keywords', default=False, action='store_true', help='Remove annotations in which text does not contain keywords')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output')
args = parser.parse_args(sys.argv[2:])
clean_labels.run_clean_labels(args.selection_criterion, args.min_labels_cutoff, args.selection_agreement, args.mode, args.is_relevant, args.exclude_incorrect, args.cutoff_worker_outliers, args.allow_nan, args.contains_keywords, args.verbose)
def stats(self):
from utils.task_helpers import stats
parser = ArgParseDefault(description='Output various stats about project', usage=STATS_USAGE_DESC)
parser.add_argument('command', choices=['all', 'overview', 'sample', 'annotation', 'annotator_outliers', 'annotation_cleaned'], help='Subcommand to run')
args = parser.parse_args(sys.argv[2:3])
if args.command == 'annotation':
parser = ArgParseDefault(description='Print stats about annotations')
parser.add_argument('-m', '--mode', choices=['all', 'mturk', 'local', 'public', 'other', '*'], type=str, required=False, default='all', help='Print stats for certain annotation modes only.')
args = parser.parse_args(sys.argv[3:])
stats('annotation', **vars(args))
elif args.command == 'annotator_outliers':
parser = ArgParseDefault(description='Find annotators which have under-performed compared to others')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other'], type=str, required=False, default='mturk', help='Print stats for certain annotation modes only.')
parser.add_argument('-b', '--batch-name', type=str, required=False, dest='batch_name', default='*', help='Only analyse for specific local/mturk batch name (this looks for a pattern in filename). Default: All data')
parser.add_argument('--agreement-cutoff', dest='agreement_cutoff', type=float, required=False, default=3, help='Z-value cutoff for inter-worker agreement deviation')
parser.add_argument('--time-cutoff', dest='time_cutoff', type=float, required=False, default=3, help='Z-value cutoff for average task duration per worker')
parser.add_argument('--min-tasks', dest='min_tasks', type=int, required=False, default=3, help='Min tasks for worker to have completed before considered as outlier')
parser.add_argument('--min-comparisons-count', dest='min_comparisons_count', type=int, required=False, default=20, help='Min number of questions to compare for a worker needed to compute agreement score')
args = parser.parse_args(sys.argv[3:])
stats('annotator_outliers', **vars(args))
else:
stats(args.command)
def split(self):
from utils.task_helpers import train_dev_test_split
parser = ArgParseDefault(description='Split annotated data into training and test data set')
parser.add_argument('--question', type=str, required=False, default='sentiment', help='Which data to load (has to be a valid question tag)')
parser.add_argument('--name', type=str, required=False, default='', help='In case there are multiple cleaned labelled data output files give name of file (without csv ending), default: No name provided (works only if a single file is present).')
parser.add_argument('--balanced-labels', dest='balanced_labels', action='store_true', default=False, help='Ensure equal label balance')
parser.add_argument('--all-questions', dest='all_questions', action='store_true', default=False, help='Generate files for all available question tags. This overwrites the `question` argument. Default: False.')
parser.add_argument('--label-tags', dest='label_tags', required=False, default=[], nargs='+', help='Only select examples with certain label tags')
parser.add_argument('--has-label', dest='has_label', required=False, default='', help='Only select examples which have also been tagged with certain label')
parser.add_argument('--dev-size', dest='dev_size', type=float, required=False, default=0.2, help='Fraction of dev size')
parser.add_argument('--test-size', dest='test_size', type=float, required=False, default=0.2, help='Fraction of test size')
parser.add_argument('--seed', type=int, required=False, default=42, help='Random state split')
args = parser.parse_args(sys.argv[2:])
train_dev_test_split(question=args.question, dev_size=args.dev_size, test_size=args.test_size, seed=args.seed, name=args.name, balanced_labels=args.balanced_labels, all_questions=args.all_questions, label_tags=args.label_tags, has_label=args.has_label)
def prepare_predict(self):
from utils.task_helpers import prepare_predict
parser = ArgParseDefault(description='Prepare data for prediction with the text-classification library. \
This function generates two files (1 for text 1 for IDs/created_at) under data/other. The text.csv file can then be predicted.')
parser.add_argument('--start_date', required=False, default=None, help='Filter start date')
parser.add_argument('--end_date', required=False, default=None, help='Filter end date')
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--url_filler', required=False, default='<url>', help='Filler for urls (if anonymize)')
parser.add_argument('--user_filler', required=False, default='@user', help='Filler for user names (if anonymize)')
args = parser.parse_args(sys.argv[2:])
prepare_predict(args)
if __name__ == '__main__':
ArgParse()
| true
| true
|
79023abee7c53a229bdd39700606f299a74e565a
| 445
|
py
|
Python
|
funnyPython/test_wordcloud.py
|
comeCU/coding-python
|
3a35e67f5a92c32734b93b5503e5b08bc63b06bd
|
[
"MIT"
] | 3
|
2018-07-27T12:56:19.000Z
|
2019-10-05T03:48:52.000Z
|
funnyPython/test_wordcloud.py
|
comeCU/coding-python
|
3a35e67f5a92c32734b93b5503e5b08bc63b06bd
|
[
"MIT"
] | null | null | null |
funnyPython/test_wordcloud.py
|
comeCU/coding-python
|
3a35e67f5a92c32734b93b5503e5b08bc63b06bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 生成词云
'''
Reference:
https://amueller.github.io/word_cloud/
https://github.com/amueller/word_cloud
'''
from wordcloud import WordCloud
import matplotlib.pyplot as plt
filename = "***.txt" # 文本
with open(filename) as f:
mytext = f.read()
# print(mytext)
wordcloud = WordCloud().generate(mytext)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off") # 隐藏坐标
plt.show()
| 16.481481
| 47
| 0.678652
|
from wordcloud import WordCloud
import matplotlib.pyplot as plt
filename = "***.txt"
with open(filename) as f:
mytext = f.read()
wordcloud = WordCloud().generate(mytext)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
| true
| true
|
79023ad609037af95cf26b6ba28f4ebd1572a490
| 26,353
|
py
|
Python
|
tests/test_lazyfixture.py
|
TvoroG/pytest-lazy-fixture
|
5fd987dd2d539202dec23367291964a33004d3ee
|
[
"MIT"
] | 242
|
2016-11-22T12:59:58.000Z
|
2022-03-25T14:01:10.000Z
|
tests/test_lazyfixture.py
|
TvoroG/pytest-lazy-fixture
|
5fd987dd2d539202dec23367291964a33004d3ee
|
[
"MIT"
] | 45
|
2016-11-18T23:37:18.000Z
|
2022-02-25T13:45:28.000Z
|
tests/test_lazyfixture.py
|
TvoroG/pytest-lazy-fixture
|
5fd987dd2d539202dec23367291964a33004d3ee
|
[
"MIT"
] | 20
|
2016-11-24T21:57:24.000Z
|
2022-03-08T18:47:07.000Z
|
# -*- coding: utf-8 -*-
import pytest
from pytest_lazyfixture import sorted_by_dependency, lazy_fixture, _sorted_argnames
try:
import numpy
except ImportError:
numpy = None
def test_fixture_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.mark.parametrize('arg1,arg2', [
('val1', pytest.lazy_fixture('one')),
('val1', 'val2')
])
def test_func(arg1, arg2):
pass
""")
assert len(items) == 3
assert items[0].callspec.params['one'] == 1
assert items[1].callspec.params['one'] == 2
def test_several_fixtures_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.fixture(params=[3,4])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,arg3', [
('val1', pytest.lazy_fixture('one'), pytest.lazy_fixture('two')),
])
def test_func(arg1, arg2, arg3):
pass
""")
assert len(items) == 4
expected_results = [
{'one': 1, 'two': 3},
{'one': 1, 'two': 4},
{'one': 2, 'two': 3},
{'one': 2, 'two': 4}
]
def is_subset(subset, superset):
return all(superset[k] == subset[k] for k in subset)
for item in items:
assert any(is_subset(result, item.callspec.params) for result in expected_results)
def test_fixtures_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture
def two():
pass
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 1
assert items[0].callspec.params['one'].name == 'two'
def test_fixtures_with_params_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture(params=[1,2])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 2
assert items[0].callspec.params['two'] == 1
assert items[1].callspec.params['two'] == 2
def test_lazy_fixture_is_value_in_parametrize(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('arg1,arg2', [
pytest.lazy_fixture(('one', 'two'))
])
def test_func(arg1, arg2):
assert arg1 == 1
assert arg2 == 2
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_funcarg_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.fixture
def three(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,three', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('two'), '3')
], indirect=['three'])
def test_func(arg1, arg2, three):
assert arg1 == 1
assert arg2 == 2
assert three == '3'
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_is_value_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return request.param
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('one', [
pytest.lazy_fixture('two')
], indirect=True)
def test_func(one):
assert one == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_param_of_fixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
def test_func(some):
assert some in [1, 2]
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_in_params_which_has_params(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 3
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested_with_one_failed(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')
])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 5
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3, failed=1)
def test_lazy_fixture_common_dependency(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.param)
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.param)
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_lazy_fixture_common_dependency_with_getfixturevalue(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.getfixturevalue('one'))
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val', ('a', 'b', 'c'))
def test_as_str(val, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2_2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val, one', (
('a', '1'), ('b', '2'), ('c', '3')
), indirect=['one'])
def test_as_str(val, one, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'b2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3)
def test_issues3_autouse_fixtures_should_run_first(testdir):
testdir.makepyfile("""
import pytest
gl = False
@pytest.fixture(autouse=True)
def auto_one():
global gl
gl = True
@pytest.fixture
def one():
return 1 if gl is True else -1
@pytest.mark.parametrize('arg1', [
pytest.lazy_fixture('one')
])
def test_some(arg1):
assert arg1 == 1
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_issues10_xfail(testdir):
testdir.makepyfile("""
import pytest
def division(a, b):
return a / b
@pytest.fixture(params=[0])
def zero(request):
return request.param
@pytest.mark.parametrize(('a', 'b'), [
pytest.param(1, pytest.lazy_fixture('zero'), marks=pytest.mark.xfail(reason=ZeroDivisionError))
])
def test_division(a, b):
division(a, b)
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=1)
def test_issues11_autouse_fixture_in_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels(object):
@pytest.fixture(autouse=True)
def setup(self):
self.var = 15
def test_model_a(self):
assert self.var == 15
def test_model_b(self):
assert self.var == 15
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_issues12_skip_test_function(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip'))
])
def test_skip1(a):
assert a == 1
@pytest.mark.skip(reason='skip')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_skip2(a):
assert a == 1
def test_after_skip(one):
assert one == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=2, passed=1)
def test_issues12_skip_test_method(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.skip(reason='skip this')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_model_a(self, a):
assert a == 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip this'))
])
def test_model_b(self, a):
assert a == 1
def test_after_skip(self, one):
assert one == 1
""")
reprec = testdir.runpytest('-s', '-v')
reprec.assert_outcomes(skipped=2, passed=1)
def test_issues12_lf_as_method_of_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_lf(self, a):
assert a == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def test_issues13_unittest_testcase_class_should_not_fail(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TestModels(unittest.TestCase):
def test_models(self):
assert True
def test_models_fail(self):
assert False
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1, failed=1)
def test_argnames_initialized_in_right_order(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
@pytest.mark.parametrize('a,b', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))
])
def test_skip1(a, b):
assert a == [3]
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
# https://github.com/TvoroG/pytest-lazy-fixture/pull/19
def test_argnames_initialized_in_right_order2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
def test_skip1(a):
assert a == [3]
def pytest_generate_tests(metafunc):
metafunc.fixturenames = ['a', 'b']
metafunc.parametrize(argnames=['a', 'b'],
argvalues=[(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))],
indirect=['b'])
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def lf(fname):
return lazy_fixture(fname)
@pytest.mark.parametrize('params,expected_paths', [
(
{'some': lf('one'), 'one': lf('three')},
['one>some'],
),
(
{'grand1': lf('parent1_1'), 'parent1_1': lf('child1'),
'grand2': lf('parent1_2'), 'parent1_2': lf('child1'),
'child1': lf('none')},
['child1>parent1_1>grand1>parent1_2>grand2', 'child1>parent1_2>grand2>parent1_1>grand1']
),
(
{'param1': 'val1', 'param2': 'val2'},
['param1>param2', 'param2>param1']
),
({}, ['']),
({'param1': 'val1'}, ['param1']),
({'param1': lf('some')}, ['param1']),
(
{'one': 1, 'as_str': lf('one'), 'as_hex': lf('one')},
['one>as_str>as_hex', 'one>as_hex>as_str']
)
])
def test_sorted_by_dependency(params, expected_paths):
sp = sorted_by_dependency(params, [])
path = '>'.join(param for param, _ in sp)
assert path in expected_paths
@pytest.mark.parametrize('params,fixturenames,expect_keys', [
({'b': 1, 'a': 0}, ['c', 'a', 'd', 'b'], ['c', 'a', 'd', 'b']),
({'b': 1, 'a': 0}, ['c', 'b'], ['c', 'b', 'a'])
])
def test_sorted_argnames(params, fixturenames, expect_keys):
assert list(_sorted_argnames(params, fixturenames)) == expect_keys
def test_lazy_fixtures_with_subfixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=["a", "A"])
def a(request):
return request.param
@pytest.fixture(params=["b", "B"])
def b(a, request):
return request.param + a
@pytest.fixture
def c(a):
return "c" + a
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b'), pytest.lazy_fixture('c')])
def d(request):
return "d" + request.param
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('d'), ""])
def e(request):
return "e" + request.param
def test_one(d):
assert d in ("da", "dA", "dba", "dbA", "dBa", "dBA", "dca", "dcA")
def test_two(e):
assert e in ("ea", "eA", "eda", "edA", "edba", "edbA", "edBa", "edBA", "edca", "edcA", "e")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=19)
def test_lazy_fixtures_in_subfixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def a():
return "a"
@pytest.fixture
def b():
return "b"
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b')])
def c(request):
return "c" + request.param
@pytest.fixture
def d(c):
return "d" + c
def test_one(d):
assert d in ("dca", "dcb")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize('autouse', [False, True])
def test_issues23(testdir, autouse):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[0, 1], autouse={})
def zero(request):
return request.param
@pytest.fixture(params=[1])
def one(request, zero):
return zero * request.param
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
])
def some(request):
return request.param
def test_func(some):
assert some in [0, 1]
""".format(autouse))
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_nested_fixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return "SOME_VALUE"
@pytest.fixture
def two(request):
return "SOME_VALUE2"
@pytest.fixture(params=[
pytest.lazy_fixture("one"),
pytest.lazy_fixture("two"),
])
def some_fixture1(request):
return request.param
@pytest.fixture
def some_fixture2(some_fixture1):
return "NEW_" + some_fixture1
def test_func(some_fixture2):
assert ((some_fixture2 == "NEW_SOME_VALUE") or (some_fixture2 == "NEW_SOME_VALUE2"))
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_usefixture_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
'using module fixture using fixture1 using module fixture using fixture2' in stdout
)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True)
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
# pytest==3.2.5
'using autouse_fixture using module fixture using fixture1 using autouse_fixture using fixture2' in stdout
or
'using module fixture using autouse_fixture using fixture1 using autouse_fixture using fixture2' in stdout
)
@pytest.mark.parametrize('autouse_scope', [
'session',
'module',
pytest.param('function', marks=pytest.mark.xfail)
])
def test_session_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir, autouse_scope):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True, scope='{autouse_scope}')
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""".format(autouse_scope=autouse_scope))
result = testdir.runpytest('-s')
assert 'using autouse_fixture using module fixture using fixture1 using fixture2' in result.stdout.str()
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt, module_fixture):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
# pytest==3.2.5
'using fixture1 using module fixture using fixture2' in stdout
or
'using module fixture using fixture1 using fixture2' in stdout
)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/42
@pytest.mark.skipif(numpy is None, reason='numpy is not installed')
def test_numpy_array_as_value(testdir):
testdir.makepyfile("""
import pytest
import numpy as np
@pytest.mark.parametrize(
'value',
[
np.arange(10, dtype=np.int64),
np.arange(10, dtype=np.int32),
]
)
def test_bug(value):
assert isinstance(value, np.ndarray)
""")
result = testdir.inline_run('-s')
result.assertoutcome(passed=2)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/46
def test_lazy_fixture_ids(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
@pytest.fixture()
def foo():
return "foo"
@pytest.fixture(params=['spam', 'eggs'])
def bar(request):
return "bar-{}".format(request.param)
@pytest.mark.parametrize("data", [lazy_fixture("foo"),
lazy_fixture("bar")])
def test_the_thing(data):
assert False
""")
result = testdir.runpytest('--collect-only')
stdout = result.stdout.str()
assert 'test_the_thing[foo]' in stdout
assert 'test_the_thing[bar-spam]' in stdout
assert 'test_the_thing[bar-eggs]' in stdout
def test_eq():
assert lazy_fixture("Lol") == lazy_fixture("Lol")
assert lazy_fixture("Lol") != lazy_fixture("Wut")
assert lazy_fixture("Lol") != 123
| 28.864184
| 114
| 0.570334
|
import pytest
from pytest_lazyfixture import sorted_by_dependency, lazy_fixture, _sorted_argnames
try:
import numpy
except ImportError:
numpy = None
def test_fixture_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.mark.parametrize('arg1,arg2', [
('val1', pytest.lazy_fixture('one')),
('val1', 'val2')
])
def test_func(arg1, arg2):
pass
""")
assert len(items) == 3
assert items[0].callspec.params['one'] == 1
assert items[1].callspec.params['one'] == 2
def test_several_fixtures_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.fixture(params=[3,4])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,arg3', [
('val1', pytest.lazy_fixture('one'), pytest.lazy_fixture('two')),
])
def test_func(arg1, arg2, arg3):
pass
""")
assert len(items) == 4
expected_results = [
{'one': 1, 'two': 3},
{'one': 1, 'two': 4},
{'one': 2, 'two': 3},
{'one': 2, 'two': 4}
]
def is_subset(subset, superset):
return all(superset[k] == subset[k] for k in subset)
for item in items:
assert any(is_subset(result, item.callspec.params) for result in expected_results)
def test_fixtures_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture
def two():
pass
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 1
assert items[0].callspec.params['one'].name == 'two'
def test_fixtures_with_params_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture(params=[1,2])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 2
assert items[0].callspec.params['two'] == 1
assert items[1].callspec.params['two'] == 2
def test_lazy_fixture_is_value_in_parametrize(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('arg1,arg2', [
pytest.lazy_fixture(('one', 'two'))
])
def test_func(arg1, arg2):
assert arg1 == 1
assert arg2 == 2
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_funcarg_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.fixture
def three(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,three', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('two'), '3')
], indirect=['three'])
def test_func(arg1, arg2, three):
assert arg1 == 1
assert arg2 == 2
assert three == '3'
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_is_value_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return request.param
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('one', [
pytest.lazy_fixture('two')
], indirect=True)
def test_func(one):
assert one == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_param_of_fixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
def test_func(some):
assert some in [1, 2]
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_in_params_which_has_params(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 3
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested_with_one_failed(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')
])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 5
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3, failed=1)
def test_lazy_fixture_common_dependency(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.param)
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.param)
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_lazy_fixture_common_dependency_with_getfixturevalue(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.getfixturevalue('one'))
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val', ('a', 'b', 'c'))
def test_as_str(val, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2_2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val, one', (
('a', '1'), ('b', '2'), ('c', '3')
), indirect=['one'])
def test_as_str(val, one, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'b2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3)
def test_issues3_autouse_fixtures_should_run_first(testdir):
testdir.makepyfile("""
import pytest
gl = False
@pytest.fixture(autouse=True)
def auto_one():
global gl
gl = True
@pytest.fixture
def one():
return 1 if gl is True else -1
@pytest.mark.parametrize('arg1', [
pytest.lazy_fixture('one')
])
def test_some(arg1):
assert arg1 == 1
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_issues10_xfail(testdir):
testdir.makepyfile("""
import pytest
def division(a, b):
return a / b
@pytest.fixture(params=[0])
def zero(request):
return request.param
@pytest.mark.parametrize(('a', 'b'), [
pytest.param(1, pytest.lazy_fixture('zero'), marks=pytest.mark.xfail(reason=ZeroDivisionError))
])
def test_division(a, b):
division(a, b)
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=1)
def test_issues11_autouse_fixture_in_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels(object):
@pytest.fixture(autouse=True)
def setup(self):
self.var = 15
def test_model_a(self):
assert self.var == 15
def test_model_b(self):
assert self.var == 15
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_issues12_skip_test_function(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip'))
])
def test_skip1(a):
assert a == 1
@pytest.mark.skip(reason='skip')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_skip2(a):
assert a == 1
def test_after_skip(one):
assert one == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=2, passed=1)
def test_issues12_skip_test_method(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.skip(reason='skip this')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_model_a(self, a):
assert a == 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip this'))
])
def test_model_b(self, a):
assert a == 1
def test_after_skip(self, one):
assert one == 1
""")
reprec = testdir.runpytest('-s', '-v')
reprec.assert_outcomes(skipped=2, passed=1)
def test_issues12_lf_as_method_of_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_lf(self, a):
assert a == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def test_issues13_unittest_testcase_class_should_not_fail(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TestModels(unittest.TestCase):
def test_models(self):
assert True
def test_models_fail(self):
assert False
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1, failed=1)
def test_argnames_initialized_in_right_order(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
@pytest.mark.parametrize('a,b', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))
])
def test_skip1(a, b):
assert a == [3]
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def test_argnames_initialized_in_right_order2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
def test_skip1(a):
assert a == [3]
def pytest_generate_tests(metafunc):
metafunc.fixturenames = ['a', 'b']
metafunc.parametrize(argnames=['a', 'b'],
argvalues=[(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))],
indirect=['b'])
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def lf(fname):
return lazy_fixture(fname)
@pytest.mark.parametrize('params,expected_paths', [
(
{'some': lf('one'), 'one': lf('three')},
['one>some'],
),
(
{'grand1': lf('parent1_1'), 'parent1_1': lf('child1'),
'grand2': lf('parent1_2'), 'parent1_2': lf('child1'),
'child1': lf('none')},
['child1>parent1_1>grand1>parent1_2>grand2', 'child1>parent1_2>grand2>parent1_1>grand1']
),
(
{'param1': 'val1', 'param2': 'val2'},
['param1>param2', 'param2>param1']
),
({}, ['']),
({'param1': 'val1'}, ['param1']),
({'param1': lf('some')}, ['param1']),
(
{'one': 1, 'as_str': lf('one'), 'as_hex': lf('one')},
['one>as_str>as_hex', 'one>as_hex>as_str']
)
])
def test_sorted_by_dependency(params, expected_paths):
sp = sorted_by_dependency(params, [])
path = '>'.join(param for param, _ in sp)
assert path in expected_paths
@pytest.mark.parametrize('params,fixturenames,expect_keys', [
({'b': 1, 'a': 0}, ['c', 'a', 'd', 'b'], ['c', 'a', 'd', 'b']),
({'b': 1, 'a': 0}, ['c', 'b'], ['c', 'b', 'a'])
])
def test_sorted_argnames(params, fixturenames, expect_keys):
assert list(_sorted_argnames(params, fixturenames)) == expect_keys
def test_lazy_fixtures_with_subfixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=["a", "A"])
def a(request):
return request.param
@pytest.fixture(params=["b", "B"])
def b(a, request):
return request.param + a
@pytest.fixture
def c(a):
return "c" + a
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b'), pytest.lazy_fixture('c')])
def d(request):
return "d" + request.param
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('d'), ""])
def e(request):
return "e" + request.param
def test_one(d):
assert d in ("da", "dA", "dba", "dbA", "dBa", "dBA", "dca", "dcA")
def test_two(e):
assert e in ("ea", "eA", "eda", "edA", "edba", "edbA", "edBa", "edBA", "edca", "edcA", "e")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=19)
def test_lazy_fixtures_in_subfixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def a():
return "a"
@pytest.fixture
def b():
return "b"
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b')])
def c(request):
return "c" + request.param
@pytest.fixture
def d(c):
return "d" + c
def test_one(d):
assert d in ("dca", "dcb")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize('autouse', [False, True])
def test_issues23(testdir, autouse):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[0, 1], autouse={})
def zero(request):
return request.param
@pytest.fixture(params=[1])
def one(request, zero):
return zero * request.param
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
])
def some(request):
return request.param
def test_func(some):
assert some in [0, 1]
""".format(autouse))
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_nested_fixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return "SOME_VALUE"
@pytest.fixture
def two(request):
return "SOME_VALUE2"
@pytest.fixture(params=[
pytest.lazy_fixture("one"),
pytest.lazy_fixture("two"),
])
def some_fixture1(request):
return request.param
@pytest.fixture
def some_fixture2(some_fixture1):
return "NEW_" + some_fixture1
def test_func(some_fixture2):
assert ((some_fixture2 == "NEW_SOME_VALUE") or (some_fixture2 == "NEW_SOME_VALUE2"))
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
def test_usefixture_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
'using module fixture using fixture1 using module fixture using fixture2' in stdout
)
def test_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True)
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
'using autouse_fixture using module fixture using fixture1 using autouse_fixture using fixture2' in stdout
or
'using module fixture using autouse_fixture using fixture1 using autouse_fixture using fixture2' in stdout
)
@pytest.mark.parametrize('autouse_scope', [
'session',
'module',
pytest.param('function', marks=pytest.mark.xfail)
])
def test_session_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir, autouse_scope):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True, scope='{autouse_scope}')
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""".format(autouse_scope=autouse_scope))
result = testdir.runpytest('-s')
assert 'using autouse_fixture using module fixture using fixture1 using fixture2' in result.stdout.str()
def test_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt, module_fixture):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
'using fixture1 using module fixture using fixture2' in stdout
or
'using module fixture using fixture1 using fixture2' in stdout
)
@pytest.mark.skipif(numpy is None, reason='numpy is not installed')
def test_numpy_array_as_value(testdir):
testdir.makepyfile("""
import pytest
import numpy as np
@pytest.mark.parametrize(
'value',
[
np.arange(10, dtype=np.int64),
np.arange(10, dtype=np.int32),
]
)
def test_bug(value):
assert isinstance(value, np.ndarray)
""")
result = testdir.inline_run('-s')
result.assertoutcome(passed=2)
def test_lazy_fixture_ids(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
@pytest.fixture()
def foo():
return "foo"
@pytest.fixture(params=['spam', 'eggs'])
def bar(request):
return "bar-{}".format(request.param)
@pytest.mark.parametrize("data", [lazy_fixture("foo"),
lazy_fixture("bar")])
def test_the_thing(data):
assert False
""")
result = testdir.runpytest('--collect-only')
stdout = result.stdout.str()
assert 'test_the_thing[foo]' in stdout
assert 'test_the_thing[bar-spam]' in stdout
assert 'test_the_thing[bar-eggs]' in stdout
def test_eq():
assert lazy_fixture("Lol") == lazy_fixture("Lol")
assert lazy_fixture("Lol") != lazy_fixture("Wut")
assert lazy_fixture("Lol") != 123
| true
| true
|
79023c061fd988d92d1f773fd227a04612682335
| 1,951
|
py
|
Python
|
xkcd_feed/src/utils.py
|
lwittchen/twitter-bots
|
74458d312aadedde192dc6289912764ee639d34d
|
[
"MIT"
] | null | null | null |
xkcd_feed/src/utils.py
|
lwittchen/twitter-bots
|
74458d312aadedde192dc6289912764ee639d34d
|
[
"MIT"
] | null | null | null |
xkcd_feed/src/utils.py
|
lwittchen/twitter-bots
|
74458d312aadedde192dc6289912764ee639d34d
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
"""
Exctract comic id from xkcd link
"""
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
"""
Load latest XKCD RSS feed and extract latest entry
"""
# get latest rss feed
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
"""
Extract latest entry from XKCD RSS feed and
parse the ID
"""
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
"""
Download latest image and store it in
current working directory
"""
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
"""
Do authentication and return read-to-use
twitter api object
"""
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
"""
Post tweet on twitter
"""
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry['link']}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
| 22.170455
| 81
| 0.621732
|
from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry['link']}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
| true
| true
|
79023c1d1df96a091ab5efcbfe1a4fb02b17ce19
| 3,783
|
py
|
Python
|
locations/spiders/aldi_uk.py
|
nbeecher/alltheplaces
|
f28b75ffbd7a6b09aaf80bf3a46cb563527632de
|
[
"MIT"
] | null | null | null |
locations/spiders/aldi_uk.py
|
nbeecher/alltheplaces
|
f28b75ffbd7a6b09aaf80bf3a46cb563527632de
|
[
"MIT"
] | null | null | null |
locations/spiders/aldi_uk.py
|
nbeecher/alltheplaces
|
f28b75ffbd7a6b09aaf80bf3a46cb563527632de
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import csv
import scrapy
import re
from locations.items import GeojsonPointItem
COOKIES = {
"bm_sz": "04B124C1C96D68082A9F61BAAAF0B6D5~YAAQdjsvF22E8Xl6AQAACr1VfAxPEt+enarZyrOZrBaNvyuX71lK5QPuDR/FgDEWBZVMRhjiIf000W7Z1PiAjxobrz2Y5LcYMH3CvUNvpdS3MjVLUMGwMEBCf9L5nD5Gs9ho2YL8T7Tz7lYvpolvaOlJnKrHyhCFxxk/uyBZ2G/0QrGKLwSaCQShDsz7ink=",
"_abck": "440E40C406E69413DCCC08ABAA3E9022~-1~YAAQdjsvF26E8Xl6AQAACr1VfAYznoJdJhX7TNIZW1Rfh6qRhzquXg+L1TWoaL7nZUjXlNls2iPIKFQrCdrWqY/CNXW+mHyXibInMflIXJi5VVB/Swq53kABYJDuXYSlCunYvJAzMSr1q12NOYswz134Y8HRNzVWhkb2jMS5whmHxS/v0vniIvS1TQtKjEQlMGzQYmN41CmLX0JobipQhDtUB4VyNwztb2DCAZiqDX8BLwWg7h/DtPd4158qU69hNhayFTgWmD76/MiR8/T536tMmcoRyWLl4fEtP/XUmKOcksuZO7dbfNxXBffTxIXPYwf1eO77LNuZTCQq5kfsGZLJX8ODju2KSjnIF1vdnyHAe98FDIm+hw==~-1~-1~-1"
}
HEADERS = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'cache-control': 'max-age=0',
'referer': 'https://www.aldi.co.uk/store-finder',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
}
class AldiUKSpider(scrapy.Spider):
name = "aldi_uk"
item_attributes = {'brand': "Aldi"}
allowed_domains = ['aldi.co.uk']
download_delay = 0.5
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
def start_requests(self):
url = 'https://www.aldi.co.uk/sitemap/store-en_gb-gbp'
yield scrapy.http.FormRequest(
url=url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse
)
def parse(self, response):
response.selector.remove_namespaces()
store_urls = response.xpath('//url/loc/text()').extract()
for store_url in store_urls:
yield scrapy.http.FormRequest(
url=store_url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse_store
)
def parse_store(self, response):
store_js = response.xpath('//script[@type="text/javascript"]/text()').extract_first()
json_data = re.search('gtmData =(.+?);', store_js).group(1)
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
properties = {
'name': data['seoData']['name'],
'ref': data['seoData']['name'],
'addr_full': data['seoData']['address']['streetAddress'],
'city': data['seoData']['address']['addressLocality'],
'postcode': data['seoData']['address']['postalCode'],
'country': data['seoData']['address']['addressCountry'],
'website': response.request.url,
'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
'lat': geodata['store']['latlng']['lat'],
'lon': geodata['store']['latlng']['lng'],
}
yield GeojsonPointItem(**properties)
| 42.988636
| 436
| 0.636532
|
import json
import csv
import scrapy
import re
from locations.items import GeojsonPointItem
COOKIES = {
"bm_sz": "04B124C1C96D68082A9F61BAAAF0B6D5~YAAQdjsvF22E8Xl6AQAACr1VfAxPEt+enarZyrOZrBaNvyuX71lK5QPuDR/FgDEWBZVMRhjiIf000W7Z1PiAjxobrz2Y5LcYMH3CvUNvpdS3MjVLUMGwMEBCf9L5nD5Gs9ho2YL8T7Tz7lYvpolvaOlJnKrHyhCFxxk/uyBZ2G/0QrGKLwSaCQShDsz7ink=",
"_abck": "440E40C406E69413DCCC08ABAA3E9022~-1~YAAQdjsvF26E8Xl6AQAACr1VfAYznoJdJhX7TNIZW1Rfh6qRhzquXg+L1TWoaL7nZUjXlNls2iPIKFQrCdrWqY/CNXW+mHyXibInMflIXJi5VVB/Swq53kABYJDuXYSlCunYvJAzMSr1q12NOYswz134Y8HRNzVWhkb2jMS5whmHxS/v0vniIvS1TQtKjEQlMGzQYmN41CmLX0JobipQhDtUB4VyNwztb2DCAZiqDX8BLwWg7h/DtPd4158qU69hNhayFTgWmD76/MiR8/T536tMmcoRyWLl4fEtP/XUmKOcksuZO7dbfNxXBffTxIXPYwf1eO77LNuZTCQq5kfsGZLJX8ODju2KSjnIF1vdnyHAe98FDIm+hw==~-1~-1~-1"
}
HEADERS = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'cache-control': 'max-age=0',
'referer': 'https://www.aldi.co.uk/store-finder',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
}
class AldiUKSpider(scrapy.Spider):
name = "aldi_uk"
item_attributes = {'brand': "Aldi"}
allowed_domains = ['aldi.co.uk']
download_delay = 0.5
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
def start_requests(self):
url = 'https://www.aldi.co.uk/sitemap/store-en_gb-gbp'
yield scrapy.http.FormRequest(
url=url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse
)
def parse(self, response):
response.selector.remove_namespaces()
store_urls = response.xpath('//url/loc/text()').extract()
for store_url in store_urls:
yield scrapy.http.FormRequest(
url=store_url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse_store
)
def parse_store(self, response):
store_js = response.xpath('//script[@type="text/javascript"]/text()').extract_first()
json_data = re.search('gtmData =(.+?);', store_js).group(1)
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
properties = {
'name': data['seoData']['name'],
'ref': data['seoData']['name'],
'addr_full': data['seoData']['address']['streetAddress'],
'city': data['seoData']['address']['addressLocality'],
'postcode': data['seoData']['address']['postalCode'],
'country': data['seoData']['address']['addressCountry'],
'website': response.request.url,
'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
'lat': geodata['store']['latlng']['lat'],
'lon': geodata['store']['latlng']['lng'],
}
yield GeojsonPointItem(**properties)
| true
| true
|
79023c79106c69eb94f687636965238bf695f36c
| 834
|
py
|
Python
|
setup.py
|
usckiki82/mask-query-aide
|
c1866d79a5f6533dd16aaef856f97abb0af0065e
|
[
"MIT"
] | null | null | null |
setup.py
|
usckiki82/mask-query-aide
|
c1866d79a5f6533dd16aaef856f97abb0af0065e
|
[
"MIT"
] | null | null | null |
setup.py
|
usckiki82/mask-query-aide
|
c1866d79a5f6533dd16aaef856f97abb0af0065e
|
[
"MIT"
] | null | null | null |
''' setup module
'''
from distutils.core import setup
# TEMPLATE
setup(
name='mask-query-aide',
version='0.0',
description='python code to train ML for detecting people with masks',
long_description=open('README.rst').read(),
author='Christine Madden',
license=open('LICENSE').read(),
author_email='christine.m.madden19@gmail.com',
packages=['mask_query_aide'],
# python_requires="<3.8",
install_requires=[
"numpy==1.16.1",
"pandas",
"matplotlib",
"opencv-python<=4.1.2.30",
"keras==2.2.4",
"tensorflow<2.0",
"tensorflow-gpu<2.0",
"imageai",
"jupyterlab",
"requests",
],
entry_points={
'console_scripts':
[
'mask_query_aide = mask_query_aide.__main__:main',
]
}
)
| 23.166667
| 74
| 0.570743
|
from distutils.core import setup
setup(
name='mask-query-aide',
version='0.0',
description='python code to train ML for detecting people with masks',
long_description=open('README.rst').read(),
author='Christine Madden',
license=open('LICENSE').read(),
author_email='christine.m.madden19@gmail.com',
packages=['mask_query_aide'],
install_requires=[
"numpy==1.16.1",
"pandas",
"matplotlib",
"opencv-python<=4.1.2.30",
"keras==2.2.4",
"tensorflow<2.0",
"tensorflow-gpu<2.0",
"imageai",
"jupyterlab",
"requests",
],
entry_points={
'console_scripts':
[
'mask_query_aide = mask_query_aide.__main__:main',
]
}
)
| true
| true
|
79023c86315cd9a246e332e449ec82a9a91b6ce9
| 500
|
py
|
Python
|
features/environment.py
|
abhisheksr01/zero-2-hero-python-flask-microservice
|
c94d3c1cee0b6888efdc1ae084b8fe2fcbf7041d
|
[
"MIT"
] | 4
|
2021-04-28T19:44:52.000Z
|
2021-12-15T23:29:22.000Z
|
features/environment.py
|
abhisheksr01/zero-2-hero-python-flask-microservice
|
c94d3c1cee0b6888efdc1ae084b8fe2fcbf7041d
|
[
"MIT"
] | null | null | null |
features/environment.py
|
abhisheksr01/zero-2-hero-python-flask-microservice
|
c94d3c1cee0b6888efdc1ae084b8fe2fcbf7041d
|
[
"MIT"
] | 6
|
2021-04-28T19:43:48.000Z
|
2021-12-15T23:29:25.000Z
|
import subprocess
import time
import os
TEST_TYPE = os.getenv("TEST_TYPE", "bdd")
def before_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = subprocess.Popen(["make", "start"])
time.sleep(4)
context.proc = proc
context.root_url = "http://localhost:5000"
else:
context.root_url = os.getenv("ROOT_ENDPOINT")
def after_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = context.proc
proc.terminate()
| 21.73913
| 53
| 0.622
|
import subprocess
import time
import os
TEST_TYPE = os.getenv("TEST_TYPE", "bdd")
def before_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = subprocess.Popen(["make", "start"])
time.sleep(4)
context.proc = proc
context.root_url = "http://localhost:5000"
else:
context.root_url = os.getenv("ROOT_ENDPOINT")
def after_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = context.proc
proc.terminate()
| true
| true
|
79023cfc051829bd828ec4faa142cf800e9c09de
| 568
|
py
|
Python
|
testing/tests/001-main/001-empty/002-authenticated/008-repositories.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 1
|
2020-12-04T18:43:10.000Z
|
2020-12-04T18:43:10.000Z
|
testing/tests/001-main/001-empty/002-authenticated/008-repositories.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | null | null | null |
testing/tests/001-main/001-empty/002-authenticated/008-repositories.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | null | null | null |
with frontend.signin():
frontend.page("repositories", expect={ "document_title": testing.expect.document_title(u"Repositories"),
"content_title": testing.expect.paleyellow_title(0, u"Repositories"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_user("admin") })
| 81.142857
| 112
| 0.473592
|
with frontend.signin():
frontend.page("repositories", expect={ "document_title": testing.expect.document_title(u"Repositories"),
"content_title": testing.expect.paleyellow_title(0, u"Repositories"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_user("admin") })
| true
| true
|
79023d27b60a99f51b193b37835bfb18ef0cfe99
| 13,440
|
py
|
Python
|
foundation_public/models/organization.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | 1
|
2020-07-16T10:58:23.000Z
|
2020-07-16T10:58:23.000Z
|
foundation_public/models/organization.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | 13
|
2018-11-30T02:29:39.000Z
|
2022-03-11T23:35:49.000Z
|
foundation_public/models/organization.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | null | null | null |
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_tenants.models import TenantMixin, DomainMixin
from smegurus import constants
from foundation_public.models.abstract_thing import AbstractPublicThing
from foundation_public.models.imageupload import PublicImageUpload
from foundation_public.models.brand import PublicBrand
from foundation_public.models.contactpoint import PublicContactPoint
from foundation_public.models.geocoordinate import PublicGeoCoordinate
from foundation_public.models.language import PublicLanguage
from foundation_public.models.openinghoursspecification import PublicOpeningHoursSpecification
from foundation_public.models.postaladdress import PublicPostalAddress
from foundation_public.models.place import PublicPlace
from foundation_tenant.utils import generate_hash
from foundation_tenant.utils import int_or_none
HOW_DISCOVERED_OPTIONS = (
("Google search", _("Google search")),
("SMEgurus.com", _("SMEgurus.com")),
("Social media", _("Social media")),
("Other", _("Other")),
)
HOW_MANY_SERVED_OPTIONS = (
(1, _('Up to 50 Entrepreneurs and 10 Advisors')),
(2, _('Up to 200 Entrepreneurs and 25 advisors')),
(3, _('Up to 400 Entrepreneurs and 50 advisors')),
)
TRADITIONAL_LEARNING_PREFERENCE = 1
BLENDED_LEARNING_PREFERENCE = 2
LEARNING_PREFERENCE_OPTIONS = (
(TRADITIONAL_LEARNING_PREFERENCE, _('Traditional Learning Preference')),
(BLENDED_LEARNING_PREFERENCE, _('Blended Learning Preference')),
)
TRADITIONAL_CHALLENGE = 1
REAL_WORLD_CHALLENGE = 2
CHALLENGE_OPTIONS = (
(TRADITIONAL_CHALLENGE, _('Traditional Challenge')),
(REAL_WORLD_CHALLENGE, _('Real World Challenge')),
)
class PublicOrganizationManager(models.Manager):
def delete_all(self):
"""
Helper function which will delete all the HouseSections in DB.
"""
items = PublicOrganization.objects.all()
for item in items.all():
item.delete()
def get_by_pk_or_none(self, pk):
"""
Helper function which gets the HouseSection object by PK parameter or
returns None result.
"""
try:
return PublicOrganization.objects.get(pk=int_or_none(pk))
except PublicOrganization.DoesNotExist:
return None
class PublicOrganization(TenantMixin, AbstractPublicThing):
"""
An organization such as a school, NGO, corporation, club, etc.
https://schema.org/Organization
"""
class Meta:
app_label = 'foundation_public'
db_table = 'smeg_organizations'
verbose_name = _('Organization')
verbose_name_plural = _('Organizations')
objects = PublicOrganizationManager()
# Payment Information.
on_trial = models.BooleanField(
default=False,
blank=True
)
paid_until = models.DateField(
auto_now_add=True,
blank=True,
null=True,
)
is_suspended = models.BooleanField(
_("Is Suspended"),
help_text=_('Variable controls if the entire tenant is suspended or not.'),
default=False,
blank=True
)
# Django-Tenant Information.
auto_create_schema = True
auto_drop_schema = True
# ------------------
# Schema Fields
# ------------------
# General Information.
address = models.ForeignKey(
PublicPostalAddress,
help_text=_('Physical address of the item.'),
null=True,
blank=True,
related_name="organization_address_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
brands = models.ManyToManyField(
PublicBrand,
help_text=_('The brand(s) associated with a product or service, or the brand(s) maintained by an organization or business person.'),
blank=True,
related_name="organization_brands_%(app_label)s_%(class)s_related"
)
contact_point = models.ForeignKey(
PublicContactPoint,
help_text=_('A contact point for a person or organization'),
null=True,
blank=True,
related_name="organization_contact_point_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
# department = models.ForeignKey(
# 'self',
# help_text=_('A relationship between an organization and a department of that organization, also described as an organization (allowing different urls, logos, opening hours). For example: a store with a pharmacy, or a bakery with a cafe.'),
# null=True,
# blank=True,
# related_name="organization_department_%(app_label)s_%(class)s_related"
# )
dissolution_date = models.DateField(
_("Dissolution Date"),
help_text=_('The date that this organization was dissolved.'),
blank=True,
null=True
)
duns = models.CharField(
_("Additional Name"),
max_length=127,
help_text=_('The Dun & Bradstreet DUNS number for identifying an organization or business person.'),
blank=True,
null=True,
)
email = models.EmailField(
_("Email"),
help_text=_('Email address.'),
null=True,
blank=True
)
fax_number = models.CharField(
_("Fax Number"),
max_length=31,
help_text=_('The fax number.'),
blank=True,
null=True,
)
founding_date = models.DateField(
_("Founding Date"),
help_text=_('The date that this organization was founded.'),
blank=True,
null=True
)
founding_location = models.ForeignKey(
PublicPlace,
help_text=_('The place where the Organization was founded.'),
null=True,
blank=True,
related_name="organization_founding_location_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
global_location_number = models.CharField(
_("Global Location Number"),
max_length=255,
help_text=_('The <a href="http://www.gs1.org/gln">Global Location Number</a> (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.'),
blank=True,
null=True,
)
isic_v4 = models.CharField(
_("ISIC V4"),
max_length=255,
help_text=_('The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.'),
blank=True,
null=True,
)
legal_name = models.CharField(
_("Legal Name"),
max_length=255,
help_text=_('The official name of the organization, e.g. the registered company name.'),
blank=True,
null=True,
)
logo = models.ForeignKey(
PublicImageUpload,
help_text=_('An associated logo.'),
null=True,
blank=True,
related_name="organization_logo_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
naics = models.CharField(
_("NAICS"),
max_length=127,
help_text=_('The North American Industry Classification System (NAICS) code for a particular organization or business person.'),
blank=True,
null=True,
)
# parent_organization = models.ForeignKey(
# 'self',
# help_text=_('The larger organization that this organization is a branch of, if any. Supersedes branchOf.'),
# null=True,
# blank=True,
# related_name="organization_parent_%(app_label)s_%(class)s_related"
# )
tax_id = models.CharField(
_("Tax ID"),
max_length=255,
help_text=_('The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in Spain.'),
blank=True,
null=True,
)
telephone = models.CharField(
_("Telephone"),
max_length=31,
help_text=_('The telephone number.'),
blank=True,
null=True,
)
vat_id = models.CharField(
_("Tax ID"),
max_length=255,
help_text=_('The Value-added Tax ID of the organization or person.'),
blank=True,
null=True,
)
users = models.ManyToManyField(
User,
help_text=_('The users that belong to this Organization.'),
blank=True,
related_name='organization_users_%(app_label)s_%(class)s_related',
)
# ------------------
# Non-Schema Fields
# ------------------
# Metric
how_discovered = models.CharField(
_("How did you hear about SME Gurus?"),
choices=HOW_DISCOVERED_OPTIONS,
max_length=127,
help_text=_('The details of how the User discovered our website.'),
null=True,
blank=True
)
how_many_served = models.PositiveSmallIntegerField(
_("Which SME Gurus package would you like?"),
help_text=_('Pick the choice which best describes how many entrepreneurs are served.'),
choices=HOW_MANY_SERVED_OPTIONS,
null=True,
blank=True
)
is_tos_signed = models.BooleanField(
_("Is terms of service signed"),
default=False
)
# Social Media
twitter_url = models.URLField(
_("Twitter"),
null=True,
blank=True
)
facebook_url = models.URLField(
_("Facebook"),
null=True,
blank=True
)
instagram_url = models.URLField(
_("Instagram"),
null=True,
blank=True
)
linkedin_url = models.URLField(
_("Linkedin"),
null=True,
blank=True
)
github_url = models.URLField(
_("GitHub"),
null=True,
blank=True
)
google_plus_url = models.URLField(
_("Google Plus"),
null=True,
blank=True
)
youtube_url = models.URLField(
_("Instagram"),
null=True,
blank=True
)
flickr_url = models.URLField(
_("Flickr"),
null=True,
blank=True
)
pintrest_url = models.URLField(
_("Pintrest"),
null=True,
blank=True
)
reddit_url = models.URLField(
_("Reddit"),
null=True,
blank=True
)
soundcloud_url = models.URLField(
_("Soundcloud"),
null=True,
blank=True
)
# Application
is_setup = models.BooleanField(
_("Is this account setup and ready"),
default=False,
help_text=_('Variable controls whether the user profile has been setup.'),
)
learning_preference = models.PositiveSmallIntegerField(
_("Learning Preference"),
help_text=_('Indicates what learning preference to use.'),
default=BLENDED_LEARNING_PREFERENCE,
choices=LEARNING_PREFERENCE_OPTIONS,
)
challenge = models.PositiveSmallIntegerField(
_("Challenge"),
help_text=_('Indicates what world challenge to use.'),
default=REAL_WORLD_CHALLENGE,
choices=CHALLENGE_OPTIONS,
)
has_mentors = models.BooleanField(
_("Has mentors."),
default=True,
help_text=_('Variable controls whether external mentors are allowed in our system.'),
)
has_perks = models.BooleanField(
_("Has perks."),
default=True,
help_text=_('Variable controls whether perks are allowed in our system.'),
)
# NOTE: A complete list of time zones can be found here: http://stackoverflow.com/q/13866926
time_zone = models.CharField(
_("Timezone"),
max_length=255,
help_text=_('The timezone this Organization belongs to.'),
blank=True,
null=True,
default='America/Toronto',
)
salt = models.CharField(
_("Salt"),
max_length=127,
help_text=_('The unique salt value for this Organization which is used in cryptographic signing.'),
default=generate_hash,
unique=True,
blank=True
)
has_staff_checkin_required = models.BooleanField(
_("Has staff check-ins required"),
blank=True,
default=True,
help_text=_('Variable controls whether advisor checks are required for moving forward with bizumla creation.')
)
amazon_affiliate_link_url = models.URLField(
_("Amazon Affiliate Link URL"),
blank=True,
default=''
)
def __str__(self):
return str(self.legal_name)
def reverse(self, view_name):
"""
Reverse the URL of the request + view name for this Organization.
"""
if self.schema_name:
return settings.SMEGURUS_APP_HTTP_PROTOCOL + self.schema_name + '.%s' % settings.SMEGURUS_APP_HTTP_DOMAIN + reverse(view_name)
else:
return settings.SMEGURUS_APP_HTTP_PROTOCOL + '%s' % settings.SMEGURUS_APP_HTTP_DOMAIN + reverse(view_name)
def load_schema(self):
from django.db import connection
# Connection will set it back to our tenant.
connection.set_schema(self.schema_name, True) # Switch to Tenant.
class PublicDomain(DomainMixin):
class Meta:
app_label = 'foundation_public'
db_table = 'smeg_domains'
verbose_name = 'Domain'
verbose_name_plural = 'Domains'
pass
| 32.230216
| 289
| 0.641741
|
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_tenants.models import TenantMixin, DomainMixin
from smegurus import constants
from foundation_public.models.abstract_thing import AbstractPublicThing
from foundation_public.models.imageupload import PublicImageUpload
from foundation_public.models.brand import PublicBrand
from foundation_public.models.contactpoint import PublicContactPoint
from foundation_public.models.geocoordinate import PublicGeoCoordinate
from foundation_public.models.language import PublicLanguage
from foundation_public.models.openinghoursspecification import PublicOpeningHoursSpecification
from foundation_public.models.postaladdress import PublicPostalAddress
from foundation_public.models.place import PublicPlace
from foundation_tenant.utils import generate_hash
from foundation_tenant.utils import int_or_none
HOW_DISCOVERED_OPTIONS = (
("Google search", _("Google search")),
("SMEgurus.com", _("SMEgurus.com")),
("Social media", _("Social media")),
("Other", _("Other")),
)
HOW_MANY_SERVED_OPTIONS = (
(1, _('Up to 50 Entrepreneurs and 10 Advisors')),
(2, _('Up to 200 Entrepreneurs and 25 advisors')),
(3, _('Up to 400 Entrepreneurs and 50 advisors')),
)
TRADITIONAL_LEARNING_PREFERENCE = 1
BLENDED_LEARNING_PREFERENCE = 2
LEARNING_PREFERENCE_OPTIONS = (
(TRADITIONAL_LEARNING_PREFERENCE, _('Traditional Learning Preference')),
(BLENDED_LEARNING_PREFERENCE, _('Blended Learning Preference')),
)
TRADITIONAL_CHALLENGE = 1
REAL_WORLD_CHALLENGE = 2
CHALLENGE_OPTIONS = (
(TRADITIONAL_CHALLENGE, _('Traditional Challenge')),
(REAL_WORLD_CHALLENGE, _('Real World Challenge')),
)
class PublicOrganizationManager(models.Manager):
def delete_all(self):
items = PublicOrganization.objects.all()
for item in items.all():
item.delete()
def get_by_pk_or_none(self, pk):
try:
return PublicOrganization.objects.get(pk=int_or_none(pk))
except PublicOrganization.DoesNotExist:
return None
class PublicOrganization(TenantMixin, AbstractPublicThing):
class Meta:
app_label = 'foundation_public'
db_table = 'smeg_organizations'
verbose_name = _('Organization')
verbose_name_plural = _('Organizations')
objects = PublicOrganizationManager()
on_trial = models.BooleanField(
default=False,
blank=True
)
paid_until = models.DateField(
auto_now_add=True,
blank=True,
null=True,
)
is_suspended = models.BooleanField(
_("Is Suspended"),
help_text=_('Variable controls if the entire tenant is suspended or not.'),
default=False,
blank=True
)
auto_create_schema = True
auto_drop_schema = True
address = models.ForeignKey(
PublicPostalAddress,
help_text=_('Physical address of the item.'),
null=True,
blank=True,
related_name="organization_address_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
brands = models.ManyToManyField(
PublicBrand,
help_text=_('The brand(s) associated with a product or service, or the brand(s) maintained by an organization or business person.'),
blank=True,
related_name="organization_brands_%(app_label)s_%(class)s_related"
)
contact_point = models.ForeignKey(
PublicContactPoint,
help_text=_('A contact point for a person or organization'),
null=True,
blank=True,
related_name="organization_contact_point_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
dissolution_date = models.DateField(
_("Dissolution Date"),
help_text=_('The date that this organization was dissolved.'),
blank=True,
null=True
)
duns = models.CharField(
_("Additional Name"),
max_length=127,
help_text=_('The Dun & Bradstreet DUNS number for identifying an organization or business person.'),
blank=True,
null=True,
)
email = models.EmailField(
_("Email"),
help_text=_('Email address.'),
null=True,
blank=True
)
fax_number = models.CharField(
_("Fax Number"),
max_length=31,
help_text=_('The fax number.'),
blank=True,
null=True,
)
founding_date = models.DateField(
_("Founding Date"),
help_text=_('The date that this organization was founded.'),
blank=True,
null=True
)
founding_location = models.ForeignKey(
PublicPlace,
help_text=_('The place where the Organization was founded.'),
null=True,
blank=True,
related_name="organization_founding_location_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
global_location_number = models.CharField(
_("Global Location Number"),
max_length=255,
help_text=_('The <a href="http://www.gs1.org/gln">Global Location Number</a> (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.'),
blank=True,
null=True,
)
isic_v4 = models.CharField(
_("ISIC V4"),
max_length=255,
help_text=_('The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.'),
blank=True,
null=True,
)
legal_name = models.CharField(
_("Legal Name"),
max_length=255,
help_text=_('The official name of the organization, e.g. the registered company name.'),
blank=True,
null=True,
)
logo = models.ForeignKey(
PublicImageUpload,
help_text=_('An associated logo.'),
null=True,
blank=True,
related_name="organization_logo_%(app_label)s_%(class)s_related",
on_delete=models.SET_NULL
)
naics = models.CharField(
_("NAICS"),
max_length=127,
help_text=_('The North American Industry Classification System (NAICS) code for a particular organization or business person.'),
blank=True,
null=True,
)
tax_id = models.CharField(
_("Tax ID"),
max_length=255,
help_text=_('The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in Spain.'),
blank=True,
null=True,
)
telephone = models.CharField(
_("Telephone"),
max_length=31,
help_text=_('The telephone number.'),
blank=True,
null=True,
)
vat_id = models.CharField(
_("Tax ID"),
max_length=255,
help_text=_('The Value-added Tax ID of the organization or person.'),
blank=True,
null=True,
)
users = models.ManyToManyField(
User,
help_text=_('The users that belong to this Organization.'),
blank=True,
related_name='organization_users_%(app_label)s_%(class)s_related',
)
how_discovered = models.CharField(
_("How did you hear about SME Gurus?"),
choices=HOW_DISCOVERED_OPTIONS,
max_length=127,
help_text=_('The details of how the User discovered our website.'),
null=True,
blank=True
)
how_many_served = models.PositiveSmallIntegerField(
_("Which SME Gurus package would you like?"),
help_text=_('Pick the choice which best describes how many entrepreneurs are served.'),
choices=HOW_MANY_SERVED_OPTIONS,
null=True,
blank=True
)
is_tos_signed = models.BooleanField(
_("Is terms of service signed"),
default=False
)
twitter_url = models.URLField(
_("Twitter"),
null=True,
blank=True
)
facebook_url = models.URLField(
_("Facebook"),
null=True,
blank=True
)
instagram_url = models.URLField(
_("Instagram"),
null=True,
blank=True
)
linkedin_url = models.URLField(
_("Linkedin"),
null=True,
blank=True
)
github_url = models.URLField(
_("GitHub"),
null=True,
blank=True
)
google_plus_url = models.URLField(
_("Google Plus"),
null=True,
blank=True
)
youtube_url = models.URLField(
_("Instagram"),
null=True,
blank=True
)
flickr_url = models.URLField(
_("Flickr"),
null=True,
blank=True
)
pintrest_url = models.URLField(
_("Pintrest"),
null=True,
blank=True
)
reddit_url = models.URLField(
_("Reddit"),
null=True,
blank=True
)
soundcloud_url = models.URLField(
_("Soundcloud"),
null=True,
blank=True
)
is_setup = models.BooleanField(
_("Is this account setup and ready"),
default=False,
help_text=_('Variable controls whether the user profile has been setup.'),
)
learning_preference = models.PositiveSmallIntegerField(
_("Learning Preference"),
help_text=_('Indicates what learning preference to use.'),
default=BLENDED_LEARNING_PREFERENCE,
choices=LEARNING_PREFERENCE_OPTIONS,
)
challenge = models.PositiveSmallIntegerField(
_("Challenge"),
help_text=_('Indicates what world challenge to use.'),
default=REAL_WORLD_CHALLENGE,
choices=CHALLENGE_OPTIONS,
)
has_mentors = models.BooleanField(
_("Has mentors."),
default=True,
help_text=_('Variable controls whether external mentors are allowed in our system.'),
)
has_perks = models.BooleanField(
_("Has perks."),
default=True,
help_text=_('Variable controls whether perks are allowed in our system.'),
)
time_zone = models.CharField(
_("Timezone"),
max_length=255,
help_text=_('The timezone this Organization belongs to.'),
blank=True,
null=True,
default='America/Toronto',
)
salt = models.CharField(
_("Salt"),
max_length=127,
help_text=_('The unique salt value for this Organization which is used in cryptographic signing.'),
default=generate_hash,
unique=True,
blank=True
)
has_staff_checkin_required = models.BooleanField(
_("Has staff check-ins required"),
blank=True,
default=True,
help_text=_('Variable controls whether advisor checks are required for moving forward with bizumla creation.')
)
amazon_affiliate_link_url = models.URLField(
_("Amazon Affiliate Link URL"),
blank=True,
default=''
)
def __str__(self):
return str(self.legal_name)
def reverse(self, view_name):
if self.schema_name:
return settings.SMEGURUS_APP_HTTP_PROTOCOL + self.schema_name + '.%s' % settings.SMEGURUS_APP_HTTP_DOMAIN + reverse(view_name)
else:
return settings.SMEGURUS_APP_HTTP_PROTOCOL + '%s' % settings.SMEGURUS_APP_HTTP_DOMAIN + reverse(view_name)
def load_schema(self):
from django.db import connection
connection.set_schema(self.schema_name, True)
class PublicDomain(DomainMixin):
class Meta:
app_label = 'foundation_public'
db_table = 'smeg_domains'
verbose_name = 'Domain'
verbose_name_plural = 'Domains'
pass
| true
| true
|
79023d7f878ceb8595925f0375ba749ae244eb01
| 14,403
|
py
|
Python
|
MasterScripts/systemprep-linuxmaster.py
|
plus3it/SystemPrep
|
8398093ce6a3c599eca463f9e0245cf5a7d9b896
|
[
"Apache-2.0"
] | 9
|
2016-08-30T19:07:31.000Z
|
2019-11-13T23:32:28.000Z
|
MasterScripts/systemprep-linuxmaster.py
|
plus3it/SystemPrep
|
8398093ce6a3c599eca463f9e0245cf5a7d9b896
|
[
"Apache-2.0"
] | 30
|
2015-12-23T17:41:11.000Z
|
2017-05-25T11:08:25.000Z
|
MasterScripts/systemprep-linuxmaster.py
|
plus3it/SystemPrep
|
8398093ce6a3c599eca463f9e0245cf5a7d9b896
|
[
"Apache-2.0"
] | 12
|
2015-11-16T14:33:49.000Z
|
2019-11-13T23:32:38.000Z
|
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
| 41.507205
| 133
| 0.545789
|
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
try:
a.update(b)
except Exception as exc:
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
def get_system_params(system):
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
download_file(url, fullfilepath, sourceiss3bucket)
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
| true
| true
|
79023de4c9ef33e9f01a0cc8011ec1c6f256f074
| 1,026
|
py
|
Python
|
tensorflow_zero_out/python/ops/convert_to_tflite.py
|
yuko29/tflite_custom_op
|
66df2c5ade62b04b920034e7721c4b6afc60e942
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_zero_out/python/ops/convert_to_tflite.py
|
yuko29/tflite_custom_op
|
66df2c5ade62b04b920034e7721c4b6afc60e942
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_zero_out/python/ops/convert_to_tflite.py
|
yuko29/tflite_custom_op
|
66df2c5ade62b04b920034e7721c4b6afc60e942
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow_zero_out
import numpy as np
import os
# Create a model using low-level tf.* APIs
class ZeroOut(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.int32)])
def __call__(self, x):
return tensorflow_zero_out.zero_out(x)
model = ZeroOut()
# (ro run your model) result = Squared(5.0) # This prints "25.0"
# (to generate a SavedModel) tf.saved_model.save(model, "saved_model_tf_dir")
concrete_func = model.__call__.get_concrete_function()
# Convert the model.
# Notes that for the versions earlier than TensorFlow 2.7, the
# from_concrete_functions API is able to work when there is only the first
# argument given:
# > converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func],
)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
| 38
| 80
| 0.722222
|
import tensorflow as tf
import tensorflow_zero_out
import numpy as np
import os
class ZeroOut(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.int32)])
def __call__(self, x):
return tensorflow_zero_out.zero_out(x)
model = ZeroOut()
del.__call__.get_concrete_function()
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func],
)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
| true
| true
|
79023ef3bb81610a3394ea5c6833a18d237823c9
| 4,344
|
py
|
Python
|
test/test_attention.py
|
jeongwhanchoi/graph-neural-pde
|
4323db3bb3badbcfc3c569635b7f8f072946528d
|
[
"Apache-2.0"
] | 125
|
2021-06-16T09:36:18.000Z
|
2022-03-26T00:16:22.000Z
|
test/test_attention.py
|
jeongwhanchoi/graph-neural-pde
|
4323db3bb3badbcfc3c569635b7f8f072946528d
|
[
"Apache-2.0"
] | 8
|
2021-06-23T04:49:12.000Z
|
2022-03-28T20:25:47.000Z
|
test/test_attention.py
|
jeongwhanchoi/graph-neural-pde
|
4323db3bb3badbcfc3c569635b7f8f072946528d
|
[
"Apache-2.0"
] | 20
|
2021-06-23T06:55:35.000Z
|
2022-03-21T17:04:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test attention
"""
import unittest
import torch
from torch import tensor
from torch import nn
from function_GAT_attention import SpGraphAttentionLayer, ODEFuncAtt
from torch_geometric.utils import softmax, to_dense_adj
from data import get_dataset
class AttentionTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2, 1], [1, 0, 1, 2]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=torch.float)
self.W = tensor([[2, 1], [3, 2]], dtype=torch.float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=torch.float)
self.edge1 = tensor([[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]])
self.x1 = torch.ones((3, 2), dtype=torch.float)
self.leakyrelu = nn.LeakyReLU(0.2)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'beta_dim': 'vc', 'heads': 2,
'K': 10,
'attention_norm_idx': 0, 'add_source': False, 'max_nfe': 1000, 'mix_features': False,
'attention_dim': 32,
'mixed_block': False, 'rewiring': None, 'no_alpha_sigmoid': False, 'reweight_attention': False,
'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None}
def tearDown(self) -> None:
pass
def test(self):
h = torch.mm(self.x, self.W)
edge_h = torch.cat((h[self.edge[0, :], :], h[self.edge[1, :], :]), dim=1)
self.assertTrue(edge_h.shape == torch.Size([self.edge.shape[1], 2 * 2]))
ah = self.alpha.mm(edge_h.t()).t()
self.assertTrue(ah.shape == torch.Size([self.edge.shape[1], 1]))
edge_e = self.leakyrelu(ah)
attention = softmax(edge_e, self.edge[1])
print(attention)
def test_function(self):
in_features = self.x.shape[1]
out_features = self.x.shape[1]
def get_round_sum(tens, n_digits=3):
val = torch.sum(tens, dim=int(not self.opt['attention_norm_idx']))
return (val * 10 ** n_digits).round() / (10 ** n_digits)
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x, self.edge) # should be n_edges x n_heads
self.assertTrue(attention.shape == (self.edge.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(self.edge, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(self.edge, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
dataset = get_dataset(self.opt, '../data', False)
data = dataset.data
in_features = data.x.shape[1]
out_features = data.x.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(data.x, data.edge_index) # should be n_edges x n_heads
self.assertTrue(attention.shape == (data.edge_index.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(data.edge_index, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(data.edge_index, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
def test_symetric_attention(self):
in_features = self.x1.shape[1]
out_features = self.x1.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x1, self.edge1) # should be n_edges x n_heads
self.assertTrue(torch.all(torch.eq(attention, 0.5 * torch.ones((self.edge1.shape[1], self.x1.shape[1])))))
def test_module(self):
dataset = get_dataset(self.opt, '../data', False)
t = 1
out_dim = 6
func = ODEFuncAtt(dataset.data.num_features, out_dim, self.opt, dataset.data, self.device)
out = func(t, dataset.data.x)
print(out.shape)
self.assertTrue(out.shape == (dataset.data.num_nodes, dataset.num_features))
| 44.326531
| 113
| 0.67058
|
import unittest
import torch
from torch import tensor
from torch import nn
from function_GAT_attention import SpGraphAttentionLayer, ODEFuncAtt
from torch_geometric.utils import softmax, to_dense_adj
from data import get_dataset
class AttentionTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2, 1], [1, 0, 1, 2]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=torch.float)
self.W = tensor([[2, 1], [3, 2]], dtype=torch.float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=torch.float)
self.edge1 = tensor([[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]])
self.x1 = torch.ones((3, 2), dtype=torch.float)
self.leakyrelu = nn.LeakyReLU(0.2)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'beta_dim': 'vc', 'heads': 2,
'K': 10,
'attention_norm_idx': 0, 'add_source': False, 'max_nfe': 1000, 'mix_features': False,
'attention_dim': 32,
'mixed_block': False, 'rewiring': None, 'no_alpha_sigmoid': False, 'reweight_attention': False,
'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None}
def tearDown(self) -> None:
pass
def test(self):
h = torch.mm(self.x, self.W)
edge_h = torch.cat((h[self.edge[0, :], :], h[self.edge[1, :], :]), dim=1)
self.assertTrue(edge_h.shape == torch.Size([self.edge.shape[1], 2 * 2]))
ah = self.alpha.mm(edge_h.t()).t()
self.assertTrue(ah.shape == torch.Size([self.edge.shape[1], 1]))
edge_e = self.leakyrelu(ah)
attention = softmax(edge_e, self.edge[1])
print(attention)
def test_function(self):
in_features = self.x.shape[1]
out_features = self.x.shape[1]
def get_round_sum(tens, n_digits=3):
val = torch.sum(tens, dim=int(not self.opt['attention_norm_idx']))
return (val * 10 ** n_digits).round() / (10 ** n_digits)
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x, self.edge)
self.assertTrue(attention.shape == (self.edge.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(self.edge, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(self.edge, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
dataset = get_dataset(self.opt, '../data', False)
data = dataset.data
in_features = data.x.shape[1]
out_features = data.x.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(data.x, data.edge_index)
self.assertTrue(attention.shape == (data.edge_index.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(data.edge_index, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(data.edge_index, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
def test_symetric_attention(self):
in_features = self.x1.shape[1]
out_features = self.x1.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x1, self.edge1)
self.assertTrue(torch.all(torch.eq(attention, 0.5 * torch.ones((self.edge1.shape[1], self.x1.shape[1])))))
def test_module(self):
dataset = get_dataset(self.opt, '../data', False)
t = 1
out_dim = 6
func = ODEFuncAtt(dataset.data.num_features, out_dim, self.opt, dataset.data, self.device)
out = func(t, dataset.data.x)
print(out.shape)
self.assertTrue(out.shape == (dataset.data.num_nodes, dataset.num_features))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.