input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
+ '_sign_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i,j].set_ylim(0,800)
ax[i, j].set_xlabel('Sign minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(logistic_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Logistic score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_logistic_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(tanh_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Tanh score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_tanh_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_mean_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_mean_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(mean_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_sum_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl' + str(
sim_counter) + '.' + args.format)
plt.close('all')
for b in range(0, np.size(args.beta)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_mean_expl[b][:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[
cl] + '_softmax_mean_expl_beta_' + str(args.beta[b]) + '_' + str(
sim_counter) + '.' + args.format)
print('Done')
def plot_sensory(args):
"""
Plots of the results obtained from the leanring model (VLM function).
"""
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
p95_mean = np.zeros((len(args.learning_rate), args.n_points + 1, len(classes)))
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
p95_all_sim = []
for sim_counter in range(0, args.N_sim):
p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_sim_' + str(sim_counter) + '.npy')
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_expl_' + str(sim_counter) + '.npy')
# Focus on 200 time steps
p95_focus = p95[0:200, :]
# Remove focus (every N points up to 200 points) - CHECK PLOT
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
# All sim vector
p95_all_sim.append(p95_jump)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(p95_jump)[0])
x_time_expl = np.linspace(0, np.shape(p95_expl)[0], np.shape(p95_expl)[0])
x_time_focus = np.linspace(0, np.shape(p95_focus)[0], np.shape(p95_focus)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_focus, p95_focus[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, np.shape(p95_focus)[0])
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_FOCUS_sim' + str(
sim_counter) + '.' + args.format)
W_p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_p95_sim_' + str(sim_counter) + '.npy')[0:args.MAX_trial, :, :]
# Plot the evolution of the synaptic weights over trials
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for k in range(0, args.wavegan_latent_dim):
ax[i, j].plot(x_time_expl, W_p95[:, k, 4 * i + j], color[k])
ax[i, j].set_ylabel('Weights', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i,j].set_ylim(-1,1)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_p95' + str(sim_counter) + '.' + args.format)
# Plot activation of the exploration
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, p95_expl[:, 4 * i + j], 'b')
#ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_expl' + str(
sim_counter) + '.' + args.format)
# Plot activation during learning
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial-1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim' + str(
sim_counter) + '.' + args.format)
# [TODO] add comment here when I try this option
if args.example == True:
if sim_counter == 1:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
ax.plot(x_time, p95_all_sim[sim_counter][:, 14], 'b')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_xlim(0, args.MAX_trial)
ax.set_xlabel('Time (in number of time steps)', fontsize=15)
ax.set_ylabel('Activation', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_R' + '.' + args.format)
plt.close('all')
# Average over multiple simulations
p95_mean_sim = np.mean(p95_all_sim, axis=0)
p95_mean[lr, :, :] = p95_mean_sim
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + | |
<filename>code/ARAX/test/test_ARAX_resultify.py
#!/usr/bin/env python3
# Usage: python3 ARAX_resultify_testcases.py
# python3 ARAX_resultify_testcases.py test_issue692
import os
import sys
import pytest
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../ARAXQuery")
from response import Response
from typing import List, Union, Dict, Tuple
import ARAX_resultify
from ARAX_resultify import ARAXResultify
from ARAX_query import ARAXQuery
# is there a better way to import swagger_server? Following SO posting 16981921
PACKAGE_PARENT = '../../UI/OpenAPI/python-flask-server'
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), PACKAGE_PARENT)))
from swagger_server.models.edge import Edge
from swagger_server.models.node import Node
from swagger_server.models.q_edge import QEdge
from swagger_server.models.q_node import QNode
from swagger_server.models.query_graph import QueryGraph
from swagger_server.models.knowledge_graph import KnowledgeGraph
from swagger_server.models.result import Result
from swagger_server.models.message import Message
def _slim_kg(kg: KnowledgeGraph) -> KnowledgeGraph:
slimmed_nodes = [Node(id=node.id,
type=node.type,
name=node.name,
qnode_ids=node.qnode_ids) for node in kg.nodes]
slimmed_edges = [Edge(id=edge.id,
source_id=edge.source_id,
target_id=edge.target_id,
type=edge.type,
qedge_ids=edge.qedge_ids) for edge in kg.edges]
return KnowledgeGraph(nodes=slimmed_nodes, edges=slimmed_edges)
def _create_node(node_id: str, node_type: List[str], qnode_ids: List[str], node_name: str = None) -> Node:
node = Node(id=node_id,
type=node_type,
name=node_name)
node.qnode_ids = qnode_ids # Must set outside initializer until (if?) qnode_ids is made an actual class attribute
return node
def _create_edge(edge_id: str, source_id: str, target_id: str, qedge_ids: List[str], edge_type: str = None) -> Edge:
edge = Edge(id=edge_id,
source_id=source_id,
target_id=target_id,
type=edge_type)
edge.qedge_ids = qedge_ids # Must set outside initializer until (if?) qedge_ids is made an actual class attribute
return edge
def _print_results_for_debug(results: List[Result]):
print()
for result in results:
print(result.essence)
for node_binding in result.node_bindings:
print(f" {node_binding.qg_id}: {node_binding.kg_id}")
for edge_binding in result.edge_bindings:
print(f" {edge_binding.qg_id}: {edge_binding.kg_id}")
def _get_result_nodes_by_qg_id(result: Result, kg_nodes_map: Dict[str, Node], qg: QueryGraph) -> Dict[str, Dict[str, Node]]:
return {qnode.id: {node_binding.kg_id: kg_nodes_map[node_binding.kg_id] for node_binding in result.node_bindings
if node_binding.qg_id == qnode.id} for qnode in qg.nodes}
def _get_result_edges_by_qg_id(result: Result, kg_edges_map: Dict[str, Edge], qg: QueryGraph) -> Dict[str, Dict[str, Edge]]:
return {qedge.id: {edge_binding.kg_id: kg_edges_map[edge_binding.kg_id] for edge_binding in result.edge_bindings
if edge_binding.qg_id == qedge.id} for qedge in qg.edges}
def _do_arax_query(actions_list: List[str], debug=False) -> Tuple[Response, Message]:
query = {"previous_message_processing_plan": {"processing_actions": actions_list}}
araxq = ARAXQuery()
response = araxq.query(query)
message = araxq.message
if response.status != 'OK' or debug:
_print_results_for_debug(message.results)
print(response.show(level=response.DEBUG))
return response, message
def _run_resultify_directly(query_graph: QueryGraph,
knowledge_graph: KnowledgeGraph,
ignore_edge_direction=True,
debug=False) -> Tuple[Response, Message]:
response = Response()
from actions_parser import ActionsParser
actions_parser = ActionsParser()
actions_list = [f"resultify(ignore_edge_direction={ignore_edge_direction})"]
result = actions_parser.parse(actions_list)
response.merge(result)
actions = result.data['actions']
assert result.status == 'OK'
resultifier = ARAXResultify()
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
parameters = actions[0]['parameters']
parameters['debug'] = 'true'
result = resultifier.apply(message, parameters)
response.merge(result)
if response.status != 'OK' or debug:
_print_results_for_debug(message.results)
print(response.show(level=response.DEBUG))
return response, message
def _convert_shorthand_to_qg(shorthand_qnodes: Dict[str, str], shorthand_qedges: Dict[str, str]) -> QueryGraph:
return QueryGraph(nodes=[QNode(id=qnode_id, is_set=bool(is_set))
for qnode_id, is_set in shorthand_qnodes.items()],
edges=[QEdge(id=qedge_id, source_id=qnodes.split("--")[0], target_id=qnodes.split("--")[1])
for qedge_id, qnodes in shorthand_qedges.items()])
def _convert_shorthand_to_kg(shorthand_nodes: Dict[str, List[str]], shorthand_edges: Dict[str, List[str]]) -> KnowledgeGraph:
nodes_dict = dict()
for qnode_id, nodes_list in shorthand_nodes.items():
for node_id in nodes_list:
node = nodes_dict.get(node_id, Node(id=node_id, qnode_ids=[]))
node.qnode_ids.append(qnode_id)
nodes_dict[node_id] = node
edges_dict = dict()
for qedge_id, edges_list in shorthand_edges.items():
for edge_key in edges_list:
source_node_id = edge_key.split("--")[0]
target_node_id = edge_key.split("--")[1]
edge = edges_dict.get(edge_key, Edge(id=edge_key, source_id=source_node_id, target_id=target_node_id, qedge_ids=[]))
edge.qedge_ids.append(qedge_id)
edges_dict[f"{qedge_id}:{edge_key}"] = edge
return KnowledgeGraph(nodes=list(nodes_dict.values()), edges=list(edges_dict.values()))
def test01():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'UniProtKB:12345',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': False},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
def test02():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'UniProtKB:12345',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
def test03():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph,
ignore_edge_direction=True)
assert len(results_list) == 2
def test04():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph,
ignore_edge_direction=True)
assert len(results_list) == 2
def test05():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False},
)
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
| |
<reponame>SvobodaJakub/WoolnoteAndroid
# University of Illinois/NCSA Open Source License
# Copyright (c) 2018, <NAME>.
# TODO: docstring for the file
import os
import copy
import zipfile
from woolnote import config
from woolnote import util
from woolnote.task_store import Task, TaskStore, MARKUP, PLAIN
# UI backend
############
class UIBackend():
def __init__(self, task_store, task_store_trash):
"""
Class holding references to the opened default and trash task stores and allowing UI-centric operations to be
performed. The operations are not tied to any particular type of UI.
Args:
task_store (woolnote.task_store.TaskStore):
task_store_trash (woolnote.task_store.TaskStore):
"""
super().__init__()
self.task_store = task_store
self.task_store_trash = task_store_trash
def helper_sanitize_task_body_before_save(self, task_to_be_updated, tainted_task_body):
"""
Sanitizes new body of a task and saves it into the task.
Args:
task_to_be_updated (woolnote.task_store.Task):
tainted_task_body (str):
Returns:
None:
"""
# TODO: if the new body contains the delimiter used by the saved file in a vulnerable way, escape/remove it (don't do it here, do it in task_store.py)
if task_to_be_updated.body_format == MARKUP:
task_to_be_updated.body = util.task_body_save_fix_multiline_markup_bullet_lists(tainted_task_body)
else:
task_to_be_updated.body = util.task_body_save_fix_newlines(tainted_task_body)
def helper_sanitize_task_before_save(self, task_to_be_updated,
tainted_task_name,
tainted_task_folder,
tainted_task_pubauthid,
tainted_task_tags,
tainted_task_body,
tainted_due_date,
tainted_formatting):
"""
Reads data for a new/saved note from POST data, performs sanitization, and correctly saves the data to a note
(that also entails resetting the reminder flag if due date changes, correctly processing body text based on
formatting used, setting the correct values for the formatting property). The data are saved into the provided
task_to_be_updated but that task is not saved into a task store (you have to do that using a different function
afterwards).
Args:
task_to_be_updated (woolnote.task_store.Task):
tainted_task_name (str):
tainted_task_folder (str):
tainted_task_pubauthid (str):
tainted_task_tags (str):
tainted_task_body (str):
tainted_due_date (str):
tainted_formatting (str):
Returns:
None:
"""
# TODO: can this be broken by other unicode newline characters?
if tainted_task_tags.endswith(", "):
tainted_task_tags = tainted_task_tags[:-2]
task_to_be_updated.name = util.sanitize_singleline_string_for_tasksave(tainted_task_name)
task_to_be_updated.folder = util.sanitize_singleline_string_for_tasksave(tainted_task_folder)
task_to_be_updated.tags = {util.sanitize_singleline_string_for_tasksave(x) for x in tainted_task_tags.split(",")}
old_due_date = task_to_be_updated.due_date
task_to_be_updated.due_date = util.sanitize_singleline_string_for_tasksave(tainted_due_date)
if old_due_date != task_to_be_updated.due_date:
# when due date changes, the note is again ready to display a red reminder
task_to_be_updated.due_date_reminder_dismissed = False
task_to_be_updated.public_share_auth = util.sanitize_singleline_string_for_tasksave(tainted_task_pubauthid)
# too short strings are inherently insecure
if len(task_to_be_updated.public_share_auth) < 5:
task_to_be_updated.public_share_auth = util.create_id_task()
if tainted_formatting == "markup":
task_to_be_updated.body_format = MARKUP
elif tainted_formatting == "plaintext":
task_to_be_updated.body_format = PLAIN
else:
# keeping unchanged, shouldn't happen
util.dbgprint("tainted_formatting had a nonstandard value {}".format(tainted_formatting))
pass
self.helper_sanitize_task_body_before_save(task_to_be_updated=task_to_be_updated,
tainted_task_body=tainted_task_body)
def save_new_note(self, task):
"""
Saves a new task into the task store. That is, a task whose taskid is not already in the task store.
Args:
task (woolnote.task_store.Task):
Returns:
None:
"""
self.task_store.add(task)
self.task_store.task_store_save()
def save_edited_note(self, task):
"""
Saves a new version of an existing task into a task store. That is, a task whose taskid is already in the task store.
Args:
task (woolnote.task_store.Task):
Returns:
None:
"""
task.changed_date = util.current_timestamp()
self.task_store.touch(task.taskid)
self.task_store.task_store_save()
def import_notes(self, replace_local_request):
"""
Imports notes from the configured path into the task store. Does either differential sync or overwrite all import
depending on the argument.
Args:
replace_local_request (bool): If replace_local_request == True, then the remote database simply replaces the local database.
Returns:
Union[str, None]: error message or None if no error
"""
self.task_store.task_store_save()
self.task_store_trash.task_store_save()
util.tasks_backup(self.task_store, self.task_store_trash, s="imp0")
# import the zip into the local directory so that it can be loaded
with zipfile.ZipFile(os.path.join(config.PATH_LOAD_DROPBOX_IMPORT, config.FILE_WOOLNOTE_ZIP), "r") as importzip:
importzip.extract(config.FILE_WOOLNOTE_DAT, config.PATH_SAVE_DB) # overwrites
use_task_store = self.task_store
use_task_store_trash = self.task_store_trash
use_task_remote_store = TaskStore(os.path.join(config.PATH_SAVE_DB, config.FILE_WOOLNOTE_DAT))
use_task_remote_store.task_store_load()
if replace_local_request:
use_task_store.store_dict_id = {}
use_task_store.task_store_load(alt_path=os.path.join(config.PATH_SAVE_DB, config.FILE_WOOLNOTE_DAT))
use_task_store.update_lamport_clock(use_task_remote_store.export_lamport_clock)
use_task_store.last_import_lamport_clock = use_task_store.lamport_clock
return None
if use_task_remote_store.last_import_lamport_clock < use_task_store.export_lamport_clock:
# if the remote store is based on an older export than the last export of the local store, abort the operation
# (bad stuff might happen when importing such files)
error_message = "Cannot import - internal database export lamport clock = {}, external database last import lamport clock = {}. ".format(
str(int(use_task_store.export_lamport_clock)),
str(int(use_task_remote_store.last_import_lamport_clock)))
return error_message
use_task_store.update_lamport_clock(use_task_remote_store.export_lamport_clock)
use_task_store.last_import_lamport_clock = use_task_store.lamport_clock
def local_change(task_local):
# util.dbgprint("def local_change(task_local):")
# util.dbgprint(task_local.lamport_timestamp > task_local.export_lamport_timestamp)
return task_local.lamport_timestamp > task_local.export_lamport_timestamp
def remote_change(task_local, task_remote):
# util.dbgprint("def remote_change(task_local, task_remote):")
# util.dbgprint(task_local.export_lamport_timestamp < task_remote.lamport_timestamp)
return task_local.export_lamport_timestamp < task_remote.lamport_timestamp
def no_change(task_local, task_remote):
# util.dbgprint("def no_change(task_local, task_remote):")
# util.dbgprint(((local_change(task_local) == False) and (remote_change(task_local, task_remote) == False)))
return ((local_change(task_local) == False) and (remote_change(task_local, task_remote) == False))
def both_change(task_local, task_remote):
# util.dbgprint("def both_change(task_local, task_remote):")
# util.dbgprint((local_change(task_local) and remote_change(task_local, task_remote)))
return (local_change(task_local) and remote_change(task_local, task_remote))
# -> current local task
# -> create new copy
# -> changed taskid
# -> changed name
# -> current remote task overwrites the current local task
def local_change_only(task_local, task_remote):
# util.dbgprint("def local_change_only(task_local, task_remote):")
# util.dbgprint((local_change(task_local) and not remote_change(task_local, task_remote)))
return (local_change(task_local) and not remote_change(task_local, task_remote))
# -> do nothing (will be exported to remote on next export)
def remote_change_only(task_local, task_remote):
# util.dbgprint("def remote_change_only(task_local, task_remote):")
# util.dbgprint((remote_change(task_local, task_remote) and not local_change(task_local)))
return (remote_change(task_local, task_remote) and not local_change(task_local))
# -> import (overwrite local)
def locally_trashed(task_remote):
# util.dbgprint("def locally_trashed(task_remote):")
# -> create temp copy
# -> change taskid
# -> change name
# -> save into local trash
# util.dbgprint (task_remote.taskid in use_task_store_trash.store_dict_id)
return task_remote.taskid in use_task_store_trash.store_dict_id
def remotely_trashed(task_local):
# util.dbgprint("def remotely_trashed(task_local):")
in_local_not_remote = task_local.taskid not in use_task_remote_store.store_dict_id
in_remote_known_then_trashed = task_local.export_lamport_timestamp == use_task_store.export_lamport_clock
# util.dbgprint((in_local_not_remote and in_remote_known_then_trashed))
return (in_local_not_remote and in_remote_known_then_trashed)
# -> trash the local copy
def new_in_local(task_local):
# util.dbgprint("def new_in_local(task_local):")
in_local_not_remote = task_local.taskid not in use_task_remote_store.store_dict_id
in_remote_known_then_trashed = task_local.export_lamport_timestamp == use_task_store.export_lamport_clock
# util.dbgprint((in_local_not_remote and not in_remote_known_then_trashed))
return (in_local_not_remote and not in_remote_known_then_trashed)
# -> do nothing (will be exported to remote on next export)
def new_in_remote(task_remote):
# util.dbgprint("def new_in_remote(task_remote):")
# util.dbgprint((task_remote.taskid not in use_task_store_trash.store_dict_id) and (task_remote.taskid not in use_task_store.store_dict_id))
return ((task_remote.taskid not in use_task_store_trash.store_dict_id) and (
task_remote.taskid not in use_task_store.store_dict_id))
# -> import
# util.dbgprint("set_tasks_local")
set_tasks_local = set(use_task_store.store_dict_id.keys())
# util.dbgprint(str(repr(set_tasks_local)))
set_tasks_local_processed = set()
# util.dbgprint("set_tasks_remote")
set_tasks_remote = set(use_task_remote_store.store_dict_id.keys())
# util.dbgprint(str(repr(set_tasks_remote)))
set_tasks_remote_processed = set()
# go through remote tasks, sync them, mark both sides as processed
for taskid in set_tasks_remote:
task_remote = use_task_remote_store.store_dict_id[taskid]
# util.dbgprint("task_remote.taskid=" + task_remote.taskid + ", name=" + task_remote.name)
if taskid in set_tasks_local:
task_local = use_task_store.store_dict_id[taskid]
# util.dbgprint("task_local.taskid=" + task_local.taskid + ", name=" + task_local.name)
if remote_change_only(task_local, task_remote):
use_task_store.add_deserialized(task_remote) # import (overwrite local)
if local_change_only(task_local, task_remote):
pass
if both_change(task_local, task_remote):
# -> current local task
# -> create new copy
# -> changed taskid
# -> changed name
# -> current remote task overwrites the current local task
tmp_task = copy.copy(task_local)
tmp_task.name += " (conflicted local copy, conflict date " + util.current_timestamp() + ", orig ID " + tmp_task.taskid + ")"
tmp_task.taskid = util.create_id_task()
use_task_store.add(tmp_task)
use_task_store.add_deserialized(task_remote)
set_tasks_local_processed.add(task_local.taskid)
set_tasks_remote_processed.add(task_remote.taskid)
# go through unprocessed remote tasks, sync them, mark as processed
for taskid in set_tasks_remote:
if taskid not in set_tasks_remote_processed:
task_remote = use_task_remote_store.store_dict_id[taskid]
# util.dbgprint("task_remote.taskid=" + task_remote.taskid + ", name=" + task_remote.name)
if locally_trashed(task_remote):
# -> create temp copy
# -> change taskid
# -> change name
# -> save into local trash
tmp_task = copy.copy(task_remote)
tmp_task.name += " (remote backup of locally trashed mote, backup date " + util.current_timestamp() + ", orig ID " + tmp_task.taskid + ")"
tmp_task.taskid = util.create_id_task()
use_task_store_trash.add(tmp_task)
if new_in_remote(task_remote):
# -> import
use_task_store.add_deserialized(task_remote) # import
set_tasks_remote_processed.add(task_remote.taskid)
# go through unprocessed local tasks, sync them, mark as processed
for taskid in set_tasks_local:
if taskid not in set_tasks_local_processed:
task_local = use_task_store.store_dict_id[taskid]
# util.dbgprint("task_local.taskid=" + task_local.taskid + ", name=" + task_local.name)
if remotely_trashed(task_local):
# -> trash the local copy
use_task_store_trash.add_deserialized(task_local)
use_task_store.remove(task_local.taskid)
pass
if new_in_local(task_local):
# -> do nothing (will be exported to remote on next export)
pass
set_tasks_local_processed.add(task_local.taskid)
util.tasks_backup(self.task_store, self.task_store_trash, s="imp1")
return None
def export_notes(self):
"""
Exports the task store to a file in the configured path.
Returns:
None:
"""
util.tasks_backup(self.task_store, self.task_store_trash)
# set clock
self.task_store.export_lamport_clock = self.task_store.lamport_clock
for taskid, task in self.task_store.store_dict_id.items():
task.export_lamport_timestamp = self.task_store.export_lamport_clock
# save the main database
self.task_store.task_store_save()
self.task_store_trash.task_store_save()
# export to .dat file (without ZIP, so to the same path as the main database)
self.task_store.task_store_save(alt_path=os.path.join(config.PATH_SAVE_DB, config.FILE_WOOLNOTE_DAT))
# export the .dat to .zip
with zipfile.ZipFile(os.path.join(config.PATH_SAVE_DROPBOX_EXPORT, config.FILE_WOOLNOTE_ZIP), "w",
compression=zipfile.ZIP_DEFLATED) as exportzip:
exportzip.write(os.path.join(config.PATH_SAVE_DB, config.FILE_WOOLNOTE_DAT), arcname=config.FILE_WOOLNOTE_DAT,
compress_type=zipfile.ZIP_DEFLATED)
def delete_taskid(self, task_id_list):
"""
Moves a specified tasks from task store into task trash store.
Args:
task_id_list (List[str]): Task ids to be deleted.
Returns:
None:
"""
for taskid in task_id_list:
task = self.task_store.store_dict_id[taskid]
| |
"CC_OXYGEN_SENSOR": {
"EPC_MEASURE_OXYGEN_CONCENTRATION_VALUE": "0xE0"
},
"CC_ILLUMINANCE_SENSOR": {
"EPC_MEASURE_ILLUMINANCE_LUX_VALUE": "0xE0",
"EPC_MEASURE_ILLUMINANCE_KILOLUX_VALUE": "0xE1"
},
"CC_SOUND_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_SOUND_DETECTION_STATUS": "0xB1",
"EPC_SOUND_DETECTION_HOLDING_TIME": "0xBE"
},
"CC_MAILING_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_MAILING_DETECTION_STATUS": "0xB1"
},
"CC_WEIGHT_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_WEIGHT_DETECTION_STATUS": "0xB1"
},
"CC_TEMPERTURE_SENSOR": {
"EPC_MEASURE_TEMPERATURE_VALUE": "0xE0"
},
"CC_HUMIDITY_SENSOR": {
"EPC_MEASURE_HUMIDITY_VALUE": "0xE0"
},
"CC_RAIN_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_RAIN_DETECTION_STATUS": "0xB1"
},
"CC_WATER_LEVEL_SENSOR": {
"EPC_WATER_LEVEL_OVER_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_WATER_LEVEL_OVER_DETECTION_STATUS": "0xB1",
"EPC_MEASURE_WATER_LEVEL_VALUE": "0xE0"
},
"CC_BATH_WATER_LEVEL_SENSOR": {
"EPC_BATH_WATER_LEVEL_OVER_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_BATH_WATER_LEVEL_OVER_DETECTION_STATUS": "0xB1",
"EPC_MEASURE_BATH_WATER_LEVEL_VALUE": "0xE0"
},
"CC_BATH_HEATING_STATUS_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_BATH_HEATING_DETECTION_STATUS": "0xB1"
},
"CC_WATER_LEAK_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_WATER_LEAK_DETECTION_STATUS": "0xB1"
},
"CC_WATER_OVERFLOW_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_WATER_OVERFLOW_DETECTION_STATUS": "0xB1"
},
"CC_FIRE_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_FIRE_OCCURRENCE_DETECTION_STATUS": "0xB1",
"EPC_FIRE_OCCURRENCE_DETECTION_STATUS_RESET": "0xBF"
},
"CC_CIGARETTE_SMOKE_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_SMOKE_DETECTION_STATUS": "0xB1"
},
"CC_CO2_SENSOR": {
"EPC_MEASURE_CO2_CONCENTRATION_VALUE": "0xE0"
},
"CC_GAS_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_GAS_DETECTION_STATUS": "0xB1",
"EPC_MEASURE_GAS_CONCENTRATION_VALUE": "0xE0"
},
"CC_VOC_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_VOC_DETECTION_STATUS": "0xB1",
"EPC_MEASURE_VOC_CONCENTRATION_VALUE": "0xE0"
},
"CC_DIFFERENTIAL_PRESSURE_SENSOR": {
"EPC_MEASURE_DIFFERENTIAL_PRESSURE_VALUE": "0xE0"
},
"CC_AIR_SPEED_SENSOR": {
"EPC_MEASURE_AIR_SPEED_VALUE": "0xE0",
"EPC_AIR_FLOW_DIRECTION": "0xE1"
},
"CC_ODOR_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_ODOR_DETECTION_STATUS": "0xB1",
"EPC_MEASURE_ODOR_VALUE": "0xE0"
},
"CC_FLAME_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_FLAME_DETECTION_STATUS": "0xB1",
"EPC_FLAME_DETECTION_STATUS_RESET": "0xBF"
},
"CC_ELECTRIC_ENERGY_SENSOR": {
"EPC_CUMULATIVE_AMOUNT_ELECTRIC_ENERGY": "0xE0",
"EPC_MEDIUM_CAPACITY_SENSOR_INSTANTANEOUS_ELECTRIC_ENERGY": "0xE1",
"EPC_SMALL_CAPACITY_SENSOR_INSTATANEOUS_ELECTRIC_ENERGY": "0xE2",
"EPC_LARGE_CAPACITY_SENSOR_INSTATANEOUS_ELECTRIC_ENERGY": "0xE3",
"EPC_CUMULATIVE_AMOUNT_ELECTRIC_ENERGY_MEASUREMENT_LOG": "0xE4",
"EPC_EFFECTIVE_VOLTAGE_VALUE": "0xE5"
},
"CC_CURRENT_VALUE_SENSOR": {
"EPC_MEASURE_CURRENT_VALUE_1": "0xE0",
"EPC_RATED_VOLTAGE_TO_BE_MEASURED": "0xE1",
"EPC_MEASURE_CURRENT_VALUE_2": "0xE2"
},
"CC_DAYLIGHT_SENSOR": {
},
"CC_WATER_FLOW_RATE_SENSOR": {
"EPC_CUMULATIVE_FLOW_RATE": "0xE0",
"EPC_FLOW_RATE": "0xE2"
},
"CC_MICROMOTION_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_MICROMOTION_DETECTION_STATUS": "0xB1",
"EPC_DETECTION_COUNTER": "0xB2",
"EPC_SAMPLING_COUNT": "0xBC",
"EPC_SAMPLING_CYCLE": "0xBD"
},
"CC_PASSAGE_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_PASSAGE_DETECTION_HOLD_TIME": "0xBE",
"EPC_PASSAGE_DETECTION_DIRECTION": "0xE0"
},
"CC_BED_PRESENCE_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_BED_PRESENCE_DETECTION_STATUS": "0xB1"
},
"CC_OPEN_CLOSE_SENSOR": {
"EPC_DEGREE_OF_OPENING_DETECTION_STATUS_1": "0xE0",
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_DEGREE_OF_OPENING_DETECTION_STATIS_2": "0xB1"
},
"CC_ACTIVITY_AMOUNT_SENSOR": {
"EPC_ACTIVITY_AMOUNT_LEVEL_1": "0xE0",
"EPC_MAXIMUM_NUMBER_OF_HUMAN_BODY_ID": "0xE1",
"EPC_ACTIVITY_AMOUNT_LEVEL_2": "0xE2",
"EPC_HUMAN_BODY_EXISTENCE_INFORMATION": "0xE3"
},
"CC_HUMAN_BODY_LOCATION_SENSOR": {
"EPC_HUMAN_BODY_DETECTION_LOCATION_1": "0xE0",
"EPC_MAXIMUM_NUMBER_OF_HUMAN_BODY_ID": "0xE1",
"EPC_HUMAN_BODY_DETECTION_LOCATION_2": "0xE2",
"EPC_HUMAN_BODY_EXISTENCE_INFORMATION": "0xE3"
},
"CC_SNOW_SENSOR": {
"EPC_DETECTION_THRESHOLD_LEVEL": "0xB0",
"EPC_SNOW_DETECTION_STATUS": "0xB1"
},
"CC_AIR_PRESSURE_SENSOR": {
"EPC_AIR_PRESSURE_MEASUREMENT": "0xE0"
}
},
"CGC_AIR_CONDITIONER_RELATED": {
"CC_HOME_AIR_CONDITIONER": {
"EPC_OPERATION_POWER_SAVING": "0x8F",
"EPC_OPERATION_MODE_SETTING": "0xB0",
"EPC_AUTOMATIC_TEMPERATURE_CONTROL_SETTING": "0xB1",
"EPC_NORMAL_HIGHSPEED_SILENT_OPERATION_SETTING": "0xB2",
"EPC_SET_TEMPERATURE_VALUE": "0xB3",
"EPC_SET_RELATIVE_HUMIDITY_IN_DEHUMIDIFYING_MODE": "0xB4",
"EPC_SET_TEMPERATURE_IN_COOLING_MODE": "0xB5",
"EPC_SET_TEMPERATURE_IN_HEATING_MODE": "0xB6",
"EPC_SET_TEMPERATURE_IN_DEHUMIDIFYING_MODE": "0xB7",
"EPC_RATED_POWER_CONSUMPTION": "0xB8",
"EPC_MEASURED_VALUE_OF_CURRENT_CONSUMPTION": "0xB9",
"EPC_MEASURED_VALUE_OF_ROOM_RELATIVE_HUMIDITY": "0xBA",
"EPC_MEASURED_VALUE_OF_ROOM_TEMPERATURE": "0xBB",
"EPC_SET_TEMPERATURE_OF_USER_REMOTE_CONTROL": "0xBC",
"EPC_MEASURED_COOLED_AIR_TEMPERATURE": "0xBD",
"EPC_MEASURED_OUTDOOR_AIR_TEMPERATURE": "0xBE",
"EPC_RELATIVE_TEMPERATURE_SETTING": "0xBF",
"EPC_AIRFLOW_RATE_SETTING": "0xA0",
"EPC_AUTOMATIC_CONTROL_AIRFLOW_DIRECTION_SETTING": "0xA1",
"EPC_AUTOMATIC_SWING_AIRFLOW_SETTING": "0xA3",
"EPC_AIRFLOW_DIRECTION_VERTICAL_SETTING": "0xA4",
"EPC_AIRFLOW_DIRECTION_HORIZONTAL_SETTING": "0xA5",
"EPC_SPECIAL_STATE": "0xAA",
"EPC_NON_PRIORITY_STATE": "0xAB",
"EPC_VENTILATION_FUNCTION_SETTING": "0xC0",
"EPC_HUMIDIFIER_FUNCTION_SETTING": "0xC1",
"EPC_VENTILATION_AIR_FLOW_RATE_SETTING": "0xC2",
"EPC_DEGREE_OF_HUMIDIFICATION_SETTING": "0xC4",
"EPC_MOUNTED_AIR_CLEANING_METHOD": "0xC6",
"EPC_AIR_PURIFIER_FUNCTION_SETTING": "0xC7",
"EPC_MOUNTED_AIR_REFRESH_METHOD": "0xC8",
"EPC_AIR_REFRESHER_FUNCTION_SETTING": "0xC9",
"EPC_MOUNTED_SELF_CLEANING_METHOD": "0xCA",
"EPC_SELF_CLEANING_FUNCTION_SETTING": "0xCB",
"EPC_SPECIAL_FUNCTION_SETTING": "0xCC",
"EPC_OPERATION_STATUS_OF_COMPONENTS": "0xCD",
"EPC_THERMOSTAT_SETTING_OVERRIDE_FUNCTION": "0xCE",
"EPC_AIR_PURIFICATION_MODE_SETTING": "0xCF",
"EPC_ON_TIMER_BASED_RESERVATION_SETTING": "0x90",
"EPC_ON_TIMER_SETTING_TIME": "0x91",
"EPC_ON_TIMER_SETTING_RELATIVE_TIME": "0x92",
"EPC_OFF_TIMER_BASED_RESERVATION_SETTING": "0x94",
"EPC_OFF_TIMER_SETTING_TIME": "0x95",
"EPC_OFF_TIMER_SETTING_RELATIVE_TIME": "0x96"
},
"CC_COLD_BLASTER": {
},
"CC_ELECTRIC_FAN": {
},
"CC_VENTILATION_FAN": {
"EPC_SET_ROOM_RELATIVE_HUMIDITY_VALUE": "0xB4"
},
"CC_AIR_CONDITIONER_VENTILATION_FAN": {
},
"CC_AIR_CLEANER": {
},
"CC_COLD_BLAST_FAN": {
},
"CC_CIRCULATOR": {
},
"CC_DEHUMIDIFIER": {
},
"CC_HUMIDIFIER": {
},
"CC_CEILING_FAN": {
},
"CC_ELECTRIC_KOTATSU": {
},
"CC_ELECTRIC_HEATING_PAD": {
},
"CC_ELECTRIC_BLANKET": {
},
"CC_SPACE_HEATER": {
},
"CC_PANEL_HEATER": {
},
"CC_ELECTRIC_CARPET": {
},
"CC_FLOOR_HEATER_0x01": {
},
"CC_ELECTRIC_HEATER": {
},
"CC_FAN_HEATER": {
},
"CC_BATTERY_CHARGER": {
},
"CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_INDOOR": {
},
"CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_OUTDOOR": {
},
"CC_PACKAGE_TYPE_COMMERCIAL_AIRCOND_THERMAL": {
},
"CC_COMMERCIAL_FAN_COIL_UNIT": {
},
"CC_COMMERCIAL_AIRCOND_COLD_SOURCE_CHILLER": {
},
"CC_COMMERCIAL_AIRCOND_HOT_SOURCE_BOILER": {
},
"CC_AIRCOND_VAV_FOR_COMMERCIAL_APPLICATIONS": {
},
"CC_AIRCOND_FOR_COMMERCIAL_APPLICATIONS": {
},
"CC_UNIT_COOLER": {
},
"CC_CONDENSING_UNIT_FOR_COMMERCIAL_APP": {
},
"CC_ELECTRIC_STORAGE_HEATER": {
}
},
"CGC_HOUSING_RELATED": {
"CC_ELECTRICALLY_OPERATED_BLIND": {
},
"CC_ELECTRICALLY_OPERATED_SHUTTER": {
},
"CC_ELECTRICALLY_OPERATED_CURTAIN": {
},
"CC_ELECTRICALLY_OPERATED_RAIN_SLIDING_DOOR": {
},
"CC_ELECTRICALLY_OPERATED_GATE": {
},
"CC_ELECTRICALLY_OPERATED_WINDOW": {
},
"CC_AUTOMATICALLY_OPERATED_ENTRANCE_DOOR": {
},
"CC_GARDEN_SPRINKLER": {
},
"CC_FIRE_SPRINKLER": {
},
"CC_FOUNTAIN": {
},
"CC_INSTANTANEOUS_WATER_HEATER": {
},
"CC_ELECTRIC_WATER_HEATER": {
},
"CC_SOLAR_WATER_HEATER": {
},
"CC_CIRCULATION_PUMP": {
},
"CC_BIDET_EQUIPPED_TOILET": {
},
"CC_ELECTRIC_LOCK": {
},
"CC_GAS_LINE_VALVE": {
},
"CC_HOME_SAUNA": {
},
"CC_HOT_WATER_GENERATOR": {
},
"CC_BATHROOM_DRYER": {
},
"CC_HOME_ELEVATOR": {
},
"CC_ELECTRICALLY_OPERATED_ROOM_DIVIDER": {
},
"CC_HORIZONTAL_TRANSFER": {
},
"CC_ELECTRICALLY_OPERATED_CLOTH_DRYING_POLE": {
},
"CC_SEPTIC_TANK": {
},
"CC_HOME_SOLAR_POWER_GENERATION": {
},
"CC_COLD_HOT_WATER_HEAT_SOURCE_EQUIPMENT": {
},
"CC_FLOOR_HEATER_0x02": {
},
"CC_FUEL_CELL": {
},
"CC_STORAGE_BATTERY": {
},
"CC_ELECTRIC_VEHICLE_CHARGER_DISCHARGER": {
},
"CC_ENGINE_COGENERATION": {
},
"CC_ELECTRIC_ENERGY_METER": {
},
"CC_WATER_FLOW_METER": {
},
"CC_GAS_METER": {
},
"CC_LP_GAS_METER": {
},
"CC_CLOCK": {
},
"CC_AUTOMATIC_DOOR": {
},
"CC_COMMERCIAL_ELEVATOR": {
},
"CC_DISTRIBUTION_PANEL_METERING": {
},
"CC_LOW_VOLTAGE_SMART_ELECTRIC_ENERGY_METER": {
},
"CC_SMART_GAS_METER": {
},
"CC_HIGH_VOLTAGE_SMART_ELECTRIC_ENERGY_METER": {
},
"CC_GENERAL_LIGHTING_CLASS": {
},
"CC_SINGLE_FUNCTION_LIGHTING": {
},
"CC_EMERGENCY_LIGHTING": {
},
"CC_EQUIPMENT_LIGHT": {
},
"CC_BUZZER": {
}
},
"CGC_COOKING_RELATED": {
"CC_COFFEE_MACHINE": {
},
"CC_COFFEE_MILL": {
},
"CC_ELECTRIC_HOT_WATER_POT": {
},
"CC_ELECTRIC_STOVE": {
},
"CC_TOASTER": {
},
"CC_JUICER_FOOD_MIXER": {
},
"CC_FOOD_PROCESSOR": {
},
"CC_REFRIGERATOR": {
},
"CC_COMBINATION_MICROWAVE_OVEN": {
},
"CC_COOKING_HEATER": {
},
"CC_OVEN": {
},
"CC_RICE_COOKER": {
},
"CC_ELECTRONIC_JAR": {
},
"CC_DISH_WASHER": {
},
"CC_DISH_DRYER": {
},
"CC_ELECTRIC_RICE_CARD_COOKER": {
},
"CC_KEEP_WARM_MACHINE": {
},
"CC_RICE_MILL": {
},
"CC_AUTOMATIC_BREAD_COOKER": {
},
"CC_SLOW_COOKER": {
},
"CC_ELECTRIC_PICKLES_COOKER": {
},
"CC_WASHING_MACHINE": {
},
"CC_CLOTHES_DRYER": {
},
"CC_ELECTRIC_IRON": {
},
"CC_TROUSER_PRESS": {
},
"CC_FUTON_DRYER": {
},
"CC_SMALL_ARTICLE_SHOES_DRYER": {
},
"CC_ELECTRIC_VACUUM_CLEANER": {
},
"CC_DISPOSER": {
},
"CC_ELECTRIC_MOSQUITO_CATCHER": {
},
"CC_COMMERCIAL_SHOW_CASE": {
},
"CC_COMMERCIAL_REFRIGERATOR": {
},
"CC_COMMERCIAL_HOT_CASE": {
},
"CC_COMMERCIAL_FRYER": {
},
"CC_COMMERCIAL_MICROWAVE_OVEN": {
},
"CC_WASHER_AND_DRYER": {
},
"CC_COMMERCIAL_SHOW_CASE_OUTDOOR_UNIT": {
}
},
"CGC_HEALTH_RELATED": {
"CC_WEIGHTING_MACHINE": {
},
"CC_CLINICAL_THERMOMETER": {
},
"CC_BLOOD_PRESSURE_METER": {
},
"CC_BLOOD_SUGAR_METER": {
},
"CC_BODY_FAT_METER": {
}
},
"CGC_MANAGEMENT_RELATED": {
"CC_SECURE_COMM_SHARED_KEY_SETUP_NODE": {
},
"CC_SWITCH": {
},
"CC_PORTABLE_TERMINAL": {
},
"CC_CONTROLLER": {
}
},
"CGC_AV_RELATED": {
"CC_DISPLAY": {
},
"CC_TELEVISION": {
},
"CC_AUDIO": {
},
"CC_NETWORK_CAMERA": {
}
},
"CGC_PROFILE_CLASS": {
"CC_NODE_PROFILE": {
"EPC_UNIQUE_IDENTIFIER_DATA": "0xBF",
"EPC_NUMBER_OF_SELF_NODE_INSTANCES": "0xD3",
"EPC_NUMBER_OF_SELF_NODE_CLASSES": "0xD4",
"EPC_INSTANCE_LIST_NOTIFICATION": "0xD5",
"EPC_SELF_NODE_INSTANCE_LIST_S": "0xD6",
"EPC_SELF_NODE_CLASS_LIST_S": "0xD7"
}
},
"CGC_USER_DEFINITION_CLASS": {
"IC_GENERAL_NODE": {
},
"IC_TRANSMISSION_ONLY_NODE": {
}
}
}
# Echonet Lite Property (EPC)
# Note: Super class
EPC_OPERATIONAL_STATUS = 0x80
EPC_INSTALLATION_LOCATION = 0x81
EPC_STANDARD_VERSION_INFORMATION = 0x82
EPC_IDENTIFICATION_NUMBER = 0x83
EPC_MEASURE_INSTANTANEOUS_POWER_CONSUMPTION = 0x84
EPC_MEASURE_CUMULATIVE_POWER_CONSUMPTION = 0x85
EPC_MANUFACTURER_FAULT_CODE = 0x86
EPC_CURRENT_LIMIT_SETTING = 0x87
EPC_FAULT_STATUS = 0x88
EPC_FAULT_DESCRIPTION = 0x89
EPC_MANUFACTURER_CODE = 0x8A
EPC_BUSINESS_FACILITY_CODE = 0x8B
EPC_PRODUCT_CODE = 0x8C
EPC_PRODUCTION_NUMBER = 0x8D
EPC_PRODUCTION_DATE = 0x8E
EPC_POWER_SAVING_OPERATIONAL_SETTING = 0x8F
EPC_REMOTE_CONTROL_SETTING = 0x93
EPC_CURRENT_TIME_SETTING = 0x97
EPC_CURRENT_DATE_SETTING = 0x98
EPC_POWER_LIMIT_SETTING = 0x99
EPC_CUMULATIVE_OPERATING_TIME = 0x9A
EPC_SET_M_PROPERTY_MAP = 0x9B
EPC_GET_M_PROPERTY_MAP = 0x9C
EPC_STATUS_CHANGE_ANNOUCEMENT_PROPERTY_MAP = 0x9D
EPC_SET_PROPERTY_MAP = 0x9E
EPC_GET_PROPERTY_MAP = 0x9F
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Gas Leak Sensor (0x01)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_GAS_LEAK_OCCURRENCE_STATUS = 0xB1
EPC_GAS_LEAK_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Crime Prevention Sensor (0x02)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_INVASION_OCCURRENCE_STATUS = 0xB1
EPC_INVASION_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Emergency Button (0x03)
EPC_EMERGENCY_OCCURRENCE_STATUS = 0xB1
EPC_EMERGENCY_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), First-aid Sensor (0x04)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_FIRST_AID_OCCURRENCE_STATUS = 0xB1
EPC_FIRST_AID_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Earthquake Sensor (0x05)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_EARTHQUAKE_OCCURRENCE_STATUS = 0xB1
EPC_EARTHQUAKE_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Electric Leak Sensor (0x06)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_ELECTRIC_LEAK_OCCURRENCE_STATUS = 0xB1
EPC_ELECTRIC_LEAK_OCCURRENCE_STATUS_RESET = 0xBF
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Human Detection Sensor (0x07)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_HUMAN_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Visitor Sensor (0x08)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_VISITOR_DETECTION_STATUS = 0xB1
EPC_VISITOR_DETECTION_HOLDING_TIME = 0xBE
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Call Sensor (0x09)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_CALL_STATUS = 0xB1
EPC_CALL_HOLDING_TIME = 0xBE
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Condensation Sensor (0x0A)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_CONDENSATION_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Air Pollution Sensor (0x0B)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_AIR_POLLUTION_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Oxygen Sensor (0x0C)
EPC_MEASURE_OXYGEN_CONCENTRATION_VALUE = 0xE0
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Illuminance Sensor (0x0D)
EPC_MEASURE_ILLUMINANCE_LUX_VALUE = 0xE0
EPC_MEASURE_ILLUMINANCE_KILOLUX_VALUE = 0xE1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Sound Sensor (0x0E)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_SOUND_DETECTION_STATUS = 0xB1
EPC_SOUND_DETECTION_HOLDING_TIME = 0xBE
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Mailing Sensor (0x0F)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_MAILING_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Weight Sensor (0x10)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_WEIGHT_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Temperature Sensor (0x11)
EPC_MEASURE_TEMPERATURE_VALUE = 0xE0
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Temperature Sensor (0x12)
EPC_MEASURE_HUMIDITY_VALUE = 0xE0
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Rain Sensor (0x13)
EPC_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_RAIN_DETECTION_STATUS = 0xB1
# Echonet Lite Property (EPC)
# Note: Sensor Class (0x00), Water Level Sensor (0x14)
EPC_WATER_LEVEL_OVER_DETECTION_THRESHOLD_LEVEL = 0xB0
EPC_WATER_LEVEL_OVER_DETECTION_STATUS = 0xB1
EPC_MEASURE_WATER_LEVEL_VALUE = 0xE0
# Echonet Lite Property | |
<gh_stars>1-10
#Standard python libraries
import numpy as np
import os
import itertools
from scipy.sparse import csr_matrix, kron, identity
from .eigen_generator import EigenGenerator
class CalculateCartesianDipoleOperatorLowMemory(EigenGenerator):
"""This class calculates the dipole operator in the eigenbasis of
the system hamiltonian directly in the cartesian basis"""
def __init__(self,parameter_file_path):
super().__init__(parameter_file_path,mask_by_occupation_num=True)
self.base_path = parameter_file_path
self.load_params()
self.set_vibrations()
self.set_H()
self.set_molecular_dipoles()
self.calculate_mu()
self.save_mu()
def set_molecular_dipoles(self,*,dipoles = None):
"""Load molecular dipoles from params file, or override with input
dipoles - must be a numpy ndarray, with shape (n,3) where n is the number of sites"""
if type(dipoles) is np.ndarray:
self.molecular_dipoles = dipoles
else:
self.molecular_dipoles = np.array(self.params['dipoles'],dtype='float')
self.set_single_to_double_dipole_matrix()
def set_single_to_double_dipole_matrix(self):
"""Given a set of dipoles for transitions from the ground to the
singly excited states, constructs the dipole transitions that take the
system from the singly excited states to the various doubly excited states
"""
singly_excited = np.arange(self.molecular_dipoles.shape[0])
doubly_excited = list(itertools.combinations(singly_excited,2))
mat = np.zeros((len(singly_excited),len(doubly_excited),3))
for i in range(len(singly_excited)):
for j in range(len(doubly_excited)):
tup = doubly_excited[j]
if i == tup[0]:
mat[i,j,:] = self.molecular_dipoles[singly_excited[tup[1]]]
elif i == tup[1]:
mat[i,j,:] = self.molecular_dipoles[singly_excited[tup[0]]]
self.molecular_dipoles_SEM_to_DEM = mat
def set_H(self,*,truncation_size = None):
if truncation_size:
self.truncation_size = truncation_size
self.set_vibrations()
self.H0 = self.manifold_hamiltonian(0)
self.H1 = self.manifold_hamiltonian(1)
if 'DEM' in self.manifolds:
self.H2 = self.manifold_hamiltonian(2)
def dipole_matrix(self,starting_manifold_num,next_manifold_num,pol):
"""Calculates the dipole matrix that connects from one
manifold to the next, using the known dipole moments and the efield
polarization, determined by the pulse number.
"""
upper_manifold_num = max(starting_manifold_num,next_manifold_num)
if abs(starting_manifold_num - next_manifold_num) != 1:
warnings.warn('Can only move from manifolds 0 to 1 or 1 to 2')
return None
# Condon approximation
vib_identity = identity(self.H0.shape[0])
if upper_manifold_num == 1:
d_vec = self.molecular_dipoles.dot(pol)
d_mat = d_vec[:,np.newaxis]
overlap_matrix = kron(d_mat,vib_identity)
elif upper_manifold_num == 2:
d_mat = self.molecular_dipoles_SEM_to_DEM.dot(pol)
overlap_matrix = kron(d_mat.T,vib_identity)
if starting_manifold_num > next_manifold_num:
# Take transpose if transition is down rather than up
overlap_matrix = np.conjugate(overlap_matrix.T)
return overlap_matrix.tocsr()
def calculate_mu_x(self):
x = np.array([1,0,0])
e0 = self.eigenvectors[0]
e1 = self.eigenvectors[1]
mu10_x = self.dipole_matrix(0,1,x)
mu10_x = mu10_x.dot(e0)
mu10_x = e1.T.dot(mu10_x)
if 'DEM' in self.manifolds:
mu21_x = self.dipole_matrix(1,2,x)
e2 = self.eigenvectors[2]
mu21_x = mu21_x.dot(e1)
mu21_x = e2.T.dot(mu21_x)
def calculate_mu_y(self):
y = np.array([0,1,0])
e0 = self.eigenvectors[0]
e1 = self.eigenvectors[1]
mu10_y = self.dipole_matrix(0,1,y)
mu10_y = mu10_y.dot(e0)
mu10_y = e1.T.dot(mu10_y)
if 'DEM' in self.manifolds:
mu21_y = self.dipole_matrix(1,2,y)
e2 = self.eigenvectors[2]
mu21_y = mu21_y.dot(e1)
mu21_y = e2.T.dot(mu21_y)
def calculate_mu_z(self):
z = np.array([0,0,1])
e0 = self.eigenvectors[0]
e1 = self.eigenvectors[1]
mu10_z = self.dipole_matrix(0,1,z)
mu10_z = mu10_z.dot(e0)
mu10_z = e1.T.dot(mu10_z)
if 'DEM' in self.manifolds:
mu21_z = self.dipole_matrix(1,2,z)
e2 = self.eigenvectors[2]
mu21_z = mu21_z.dot(e1)
mu21_z = e2.T.dot(mu21_z)
def combine_mu(self):
mu10 = np.zeros((mu10_x.shape[0],mu10_x.shape[1],3))
mu10[:,:,0] = mu10_x
mu10[:,:,1] = mu10_y
mu10[:,:,2] = mu10_z
self.mu = {'GSM_to_SEM':mu10}
if 'DEM' in self.manifolds:
mu21 = np.zeros((mu21_x.shape[0],mu21_x.shape[1],3))
mu21[:,:,0] = mu21_x
mu21[:,:,1] = mu21_y
mu21[:,:,2] = mu21_z
self.mu['SEM_to_DEM'] = mu21
def save_mu(self):
np.savez(os.path.join(self.base_path,'mu.npz'),**self.mu)
class CalculateCartesianDipoleOperator(EigenGenerator):
"""This class calculates the dipole operator in the eigenbasis of
the system hamiltonian directly in the cartesian basis"""
def __init__(self,parameter_file_path):
super().__init__(parameter_file_path,mask_by_occupation_num=True)
self.base_path = parameter_file_path
self.load_params()
self.set_vibrations()
self.set_H()
self.set_molecular_dipoles()
self.calculate_mu()
self.save_mu()
def set_molecular_dipoles(self,*,dipoles = None):
"""Load molecular dipoles from params file, or override with input
dipoles - must be a numpy ndarray, with shape (n,3) where n is the number of sites"""
if type(dipoles) is np.ndarray:
self.molecular_dipoles = dipoles
else:
self.molecular_dipoles = np.array(self.params['dipoles'],dtype='float')
self.set_single_to_double_dipole_matrix()
def set_single_to_double_dipole_matrix(self):
"""Given a set of dipoles for transitions from the ground to the
singly excited states, constructs the dipole transitions that take the
system from the singly excited states to the various doubly excited states
"""
singly_excited = np.arange(self.molecular_dipoles.shape[0])
doubly_excited = list(itertools.combinations(singly_excited,2))
mat = np.zeros((len(singly_excited),len(doubly_excited),3))
for i in range(len(singly_excited)):
for j in range(len(doubly_excited)):
tup = doubly_excited[j]
if i == tup[0]:
mat[i,j,:] = self.molecular_dipoles[singly_excited[tup[1]]]
elif i == tup[1]:
mat[i,j,:] = self.molecular_dipoles[singly_excited[tup[0]]]
self.molecular_dipoles_SEM_to_DEM = mat
def set_H(self,*,truncation_size = None):
if truncation_size:
self.truncation_size = truncation_size
self.set_vibrations()
self.H0 = self.manifold_hamiltonian(0)
self.H1 = self.manifold_hamiltonian(1)
if 'DEM' in self.manifolds:
self.H2 = self.manifold_hamiltonian(2)
def dipole_matrix(self,starting_manifold_num,next_manifold_num,pol):
"""Calculates the dipole matrix that connects from one
manifold to the next, using the known dipole moments and the efield
polarization, determined by the pulse number.
"""
upper_manifold_num = max(starting_manifold_num,next_manifold_num)
if abs(starting_manifold_num - next_manifold_num) != 1:
warnings.warn('Can only move from manifolds 0 to 1 or 1 to 2')
return None
# Condon approximation
vib_identity = identity(self.H0.shape[0])
if upper_manifold_num == 1:
d_vec = self.molecular_dipoles.dot(pol)
d_mat = d_vec[:,np.newaxis]
overlap_matrix = kron(d_mat,vib_identity)
elif upper_manifold_num == 2:
d_mat = self.molecular_dipoles_SEM_to_DEM.dot(pol)
overlap_matrix = kron(d_mat.T,vib_identity)
if starting_manifold_num > next_manifold_num:
# Take transpose if transition is down rather than up
overlap_matrix = np.conjugate(overlap_matrix.T)
return overlap_matrix.tocsr()
def calculate_mu(self):
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
e0 = self.eigenvectors[0]
e1 = self.eigenvectors[1]
mu10_x = self.dipole_matrix(0,1,x)
mu10_y = self.dipole_matrix(0,1,y)
mu10_z = self.dipole_matrix(0,1,z)
mu10_x = mu10_x.dot(e0)
mu10_x = e1.T.dot(mu10_x)
mu10_y = mu10_y.dot(e0)
mu10_y = e1.T.dot(mu10_y)
mu10_z = mu10_z.dot(e0)
mu10_z = e1.T.dot(mu10_z)
mu10 = np.zeros((mu10_x.shape[0],mu10_x.shape[1],3))
mu10[:,:,0] = mu10_x
mu10[:,:,1] = mu10_y
mu10[:,:,2] = mu10_z
self.mu = {'GSM_to_SEM':mu10}
if 'DEM' in self.manifolds:
mu21_x = self.dipole_matrix(1,2,x)
mu21_y = self.dipole_matrix(1,2,y)
mu21_z = self.dipole_matrix(1,2,z)
e2 = self.eigenvectors[2]
mu21_x = mu21_x.dot(e1)
mu21_x = e2.T.dot(mu21_x)
mu21_y = mu21_y.dot(e1)
mu21_y = e2.T.dot(mu21_y)
mu21_z = mu21_z.dot(e1)
mu21_z = e2.T.dot(mu21_z)
mu21 = np.zeros((mu21_x.shape[0],mu21_x.shape[1],3))
mu21[:,:,0] = mu21_x
mu21[:,:,1] = mu21_y
mu21[:,:,2] = mu21_z
self.mu['SEM_to_DEM'] = mu21
def save_mu(self):
np.savez(os.path.join(self.base_path,'mu.npz'),**self.mu)
class CalculateDipoleOperator(EigenGenerator):
"""This class calculates the dipole operator in the eigenbasis of
the system Hamiltonian using the eigenvectors"""
def __init__(self,parameter_file_path):
super().__init__(parameter_file_path)
self.base_path = parameter_file_path
self.load_params()
self.set_mu()
def x0(self,size):
"""Defines the identity operator in the vibrational space"""
ham = np.diag(np.ones(size))
return csr_matrix(ham)
def x1(self,size):
"""Defines the position operator in the vibrational space"""
def offdiag1(n):
return np.sqrt((n+1)/2)
n = np.arange(0,size)
off1 = offdiag1(n[0:-1])
ham = np.zeros((size,size))
ham += np.diag(off1,k=1) + np.diag(off1,k=-1)
return csr_matrix(ham)
def new_vibration_identity_kron(self,position,item):
"""Takes in an operator on a single vibrational and krons it with the
correct number of vibrational identities, inserting it into its
position as indexed by its mode position as specified in the
input file
"""
identities = [np.identity(self.truncation_size) for n in
range(self.num_vibrations-1)]
identities.insert(position,item)
mat = identities.pop(0)
for next_item in identities:
mat = kron(mat,next_item)
return mat
def mu_vibrational_space(self):
"""Untested for condon violations """
ident = self.x0(self.truncation_size)
mu = self.new_vibration_identity_kron(0,ident) # Condon Approximation
try:
kappas = np.array(self.params['kappa'])
except KeyError:
# If parameters file does not specify a condon violation,
# Assume condon approximation holds
kappas = np.zeros(self.num_vibrations)
if np.all(kappas == 0):
# Assumes condon approximation
pass
else:
# Linear condon violation supported so far
x = self.x1(self.truncation_size)
for i in range(kappas.size):
mu += kappas[i] * self.new_vibration_identity_kron(i,x)
return mu
def mu_inner_product(self,eigmats1,eigmats2):
"""Example of how to write a potentially more complicated
dipole operator on the vibrational space.
Args:
eigmats1 (np.ndarray): 3d numpy array with indices [n,m,o]
where n is the site index, m is the vibrational-space index
and o is the eigen index
eigmast2 (np.ndarray): 3d numpy array with indices [n,m,o] (same
as eigmats1)
"""
sites1, vib, num_eig1 = eigmats1.shape
sites2, vib, num_eig2 = eigmats2.shape
in_prod = np.zeros((num_eig1,num_eig2,sites1,sites2))
vib_mu = self.mu_vibrational_space()
# iterate over all sites
for i in range(sites1):
eigvecs1 = eigmats1[i,...]
# Take matrix product with vibrational space mu
eigvecs1 = vib_mu.dot(eigvecs1)
for j in range(sites2):
eigvecs2 = eigmats2[j,...]
in_prod[...,i,j] = np.dot(eigvecs1.T,eigvecs2)
return in_prod
def simple_inner_product(self,eigmats1,eigmats2):
return np.einsum('mji,njk',eigmats1,eigmats2)
def make_overlap_matrix(self,manifold1,manifold2):
eigvecs1 = self.eigenvectors[manifold1]
eigvecs2 = self.eigenvectors[manifold2]
num_eigvals1 = self.eigenvalues[manifold1].size
num_eigvals2 = self.eigenvalues[manifold2].size
num_sites1 = len(self.energies[manifold1])
num_sites2 = len(self.energies[manifold2])
vibration_space_size = len(self.eigenvectors[0][:,0])
eigmats1 = eigvecs1.reshape((num_sites1,vibration_space_size,num_eigvals1))
eigmats2 = eigvecs2.reshape((num_sites2,vibration_space_size,num_eigvals2))
overlap_matrix = self.simple_inner_product(eigmats1,eigmats2)
return overlap_matrix
def calculate_mu(self):
self.mu_GSM_to_SEM_site = self.make_overlap_matrix(1,0)
if 'DEM' in self.manifolds:
self.mu_SEM_to_DEM_site = self.make_overlap_matrix(2,1)
def set_mu(self):
file_name = os.path.join(self.base_path,'mu_site_basis.npz')
try:
mu_archive = np.load(file_name)
self.mu_GSM_to_SEM_site = mu_archive['GSM_to_SEM']
if 'DEM' in self.manifolds:
self.mu_SEM_to_DEM_site = mu_archive['SEM_to_DEM']
except (FileNotFoundError, KeyError):
self.calculate_mu()
self.save_mu()
def save_mu(self):
file_name = os.path.join(self.base_path,'mu_site_basis.npz')
mu_site_dict = {'GSM_to_SEM':self.mu_GSM_to_SEM_site}
if 'DEM' in self.manifolds:
mu_site_dict['SEM_to_DEM'] = self.mu_SEM_to_DEM_site
np.savez(file_name,**mu_site_dict)
class DipoleConverter(CalculateDipoleOperator):
"""Converts mu represented in the site basis into mu represented
in cartesian coordinates
"""
def __init__(self,parameter_file_path):
super().__init__(parameter_file_path)
self.set_molecular_dipoles()
self.save_cartesian_mu()
### Setting the molecular dipole
def set_molecular_dipoles(self,*,dipoles = None):
"""Load molecular dipoles from params file, or override with input
dipoles - must be a numpy ndarray, with shape (n,3) where n is the number of sites"""
if type(dipoles) is np.ndarray:
self.molecular_dipoles = dipoles
else:
self.molecular_dipoles | |
- 'prev', 'next' as count
# - Sequence-Negation option
all = self.listmessages()
# Observed behavior: test for empty folder is done first
if not all:
raise Error, "no messages in %s" % self.name
# Common case first: all is frequently the default
if seq == 'all':
return all
# Test for X:Y before X-Y because 'seq:-n' matches both
i = seq.find(':')
if i >= 0:
head, dir, tail = seq[:i], '', seq[i+1:]
if tail[:1] in '-+':
dir, tail = tail[:1], tail[1:]
if not isnumeric(tail):
raise Error, "bad message list %s" % seq
try:
count = int(tail)
except (ValueError, OverflowError):
# Can't use sys.maxint because of i+count below
count = len(all)
try:
anchor = self._parseindex(head, all)
except Error, msg:
seqs = self.getsequences()
if not head in seqs:
if not msg:
msg = "bad message list %s" % seq
raise Error, msg, sys.exc_info()[2]
msgs = seqs[head]
if not msgs:
raise Error, "sequence %s empty" % head
if dir == '-':
return msgs[-count:]
else:
return msgs[:count]
else:
if not dir:
if head in ('prev', 'last'):
dir = '-'
if dir == '-':
i = bisect(all, anchor)
return all[max(0, i-count):i]
else:
i = bisect(all, anchor-1)
return all[i:i+count]
# Test for X-Y next
i = seq.find('-')
if i >= 0:
begin = self._parseindex(seq[:i], all)
end = self._parseindex(seq[i+1:], all)
i = bisect(all, begin-1)
j = bisect(all, end)
r = all[i:j]
if not r:
raise Error, "bad message list %s" % seq
return r
# Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
try:
n = self._parseindex(seq, all)
except Error, msg:
seqs = self.getsequences()
if not seq in seqs:
if not msg:
msg = "bad message list %s" % seq
raise Error, msg
return seqs[seq]
else:
if n not in all:
if isnumeric(seq):
raise Error, "message %d doesn't exist" % n
else:
raise Error, "no %s message" % seq
else:
return [n]
def _parseindex(self, seq, all):
"""Internal: parse a message number (or cur, first, etc.)."""
if isnumeric(seq):
try:
return int(seq)
except (OverflowError, ValueError):
return sys.maxint
if seq in ('cur', '.'):
return self.getcurrent()
if seq == 'first':
return all[0]
if seq == 'last':
return all[-1]
if seq == 'next':
n = self.getcurrent()
i = bisect(all, n)
try:
return all[i]
except IndexError:
raise Error, "no next message"
if seq == 'prev':
n = self.getcurrent()
i = bisect(all, n-1)
if i == 0:
raise Error, "no prev message"
try:
return all[i-1]
except IndexError:
raise Error, "no prev message"
raise Error, None
def openmessage(self, n):
"""Open a message -- returns a Message object."""
return Message(self, n)
def removemessages(self, list):
"""Remove one or more messages -- may raise os.error."""
errors = []
deleted = []
for n in list:
path = self.getmessagefilename(n)
commapath = self.getmessagefilename(',' + str(n))
try:
os.unlink(commapath)
except os.error:
pass
try:
os.rename(path, commapath)
except os.error, msg:
errors.append(msg)
else:
deleted.append(n)
if deleted:
self.removefromallsequences(deleted)
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def refilemessages(self, list, tofolder, keepsequences=0):
"""Refile one or more messages -- may raise os.error.
'tofolder' is an open folder object."""
errors = []
refiled = {}
for n in list:
ton = tofolder.getlast() + 1
path = self.getmessagefilename(n)
topath = tofolder.getmessagefilename(ton)
try:
os.rename(path, topath)
except os.error:
# Try copying
try:
shutil.copy2(path, topath)
os.unlink(path)
except (IOError, os.error), msg:
errors.append(msg)
try:
os.unlink(topath)
except os.error:
pass
continue
tofolder.setlast(ton)
refiled[n] = ton
if refiled:
if keepsequences:
tofolder._copysequences(self, refiled.items())
self.removefromallsequences(refiled.keys())
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def _copysequences(self, fromfolder, refileditems):
"""Helper for refilemessages() to copy sequences."""
fromsequences = fromfolder.getsequences()
tosequences = self.getsequences()
changed = 0
for name, seq in fromsequences.items():
try:
toseq = tosequences[name]
new = 0
except KeyError:
toseq = []
new = 1
for fromn, ton in refileditems:
if fromn in seq:
toseq.append(ton)
changed = 1
if new and toseq:
tosequences[name] = toseq
if changed:
self.putsequences(tosequences)
def movemessage(self, n, tofolder, ton):
"""Move one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
try:
os.rename(path, topath)
except os.error:
# Try copying
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
os.unlink(path)
self.removefromallsequences([n])
def copymessage(self, n, tofolder, ton):
"""Copy one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
def createmessage(self, n, txt):
"""Create a message, with text from the open file txt."""
path = self.getmessagefilename(n)
backuppath = self.getmessagefilename(',%d' % n)
try:
os.rename(path, backuppath)
except os.error:
pass
ok = 0
BUFSIZE = 16*1024
try:
f = open(path, "w")
while 1:
buf = txt.read(BUFSIZE)
if not buf:
break
f.write(buf)
f.close()
ok = 1
finally:
if not ok:
try:
os.unlink(path)
except os.error:
pass
def removefromallsequences(self, list):
"""Remove one or more messages from all sequences (including last)
-- but not from 'cur'!!!"""
if hasattr(self, 'last') and self.last in list:
del self.last
sequences = self.getsequences()
changed = 0
for name, seq in sequences.items():
if name == 'cur':
continue
for n in list:
if n in seq:
seq.remove(n)
changed = 1
if not seq:
del sequences[name]
if changed:
self.putsequences(sequences)
def getlast(self):
"""Return the last message number."""
if not hasattr(self, 'last'):
self.listmessages() # Set self.last
return self.last
def setlast(self, last):
"""Set the last message number."""
if last is None:
if hasattr(self, 'last'):
del self.last
else:
self.last = last
class Message(mimetools.Message):
def __init__(self, f, n, fp = None):
"""Constructor."""
self.folder = f
self.number = n
if fp is None:
path = f.getmessagefilename(n)
fp = open(path, 'r')
mimetools.Message.__init__(self, fp)
def __repr__(self):
"""String representation."""
return 'Message(%s, %s)' % (repr(self.folder), self.number)
def getheadertext(self, pred = None):
"""Return the message's header text as a string. If an
argument is specified, it is used as a filter predicate to
decide which headers to return (its argument is the header
name converted to lower case)."""
if pred is None:
return ''.join(self.headers)
headers = []
hit = 0
for line in self.headers:
if not line[0].isspace():
i = line.find(':')
if i > 0:
hit = pred(line[:i].lower())
if hit: headers.append(line)
return ''.join(headers)
def getbodytext(self, decode = 1):
"""Return the message's body text as string. This undoes a
Content-Transfer-Encoding, but does not interpret other MIME
features (e.g. multipart messages). To suppress decoding,
pass 0 as an argument."""
self.fp.seek(self.startofbody)
encoding = self.getencoding()
if not decode or encoding in ('', '7bit', '8bit', 'binary'):
return self.fp.read()
from StringIO import StringIO
output = StringIO()
mimetools.decode(self.fp, output, encoding)
return output.getvalue()
def getbodyparts(self):
"""Only for multipart messages: return the message's body as a
list of SubMessage objects. Each submessage object behaves
(almost) as a Message object."""
if self.getmaintype() != 'multipart':
raise Error, 'Content-Type is not multipart/*'
bdry = self.getparam('boundary')
if not bdry:
raise Error, 'multipart/* without boundary param'
self.fp.seek(self.startofbody)
mf = multifile.MultiFile(self.fp)
mf.push(bdry)
parts = []
while mf.next():
n = "%s.%r" % (self.number, 1 + len(parts))
part = SubMessage(self.folder, n, mf)
parts.append(part)
mf.pop()
return parts
def getbody(self):
"""Return body, either a string or a list of messages."""
if self.getmaintype() == 'multipart':
return self.getbodyparts()
else:
return self.getbodytext()
class SubMessage(Message):
def __init__(self, f, n, fp):
"""Constructor."""
Message.__init__(self, f, n, fp)
if self.getmaintype() == 'multipart':
self.body = Message.getbodyparts(self)
else:
self.body = Message.getbodytext(self)
self.bodyencoded = Message.getbodytext(self, decode=0)
# XXX If this is big, should remember file pointers
def __repr__(self):
"""String representation."""
f, n, fp = self.folder, self.number, self.fp
return 'SubMessage(%s, %s, %s)' % (f, n, fp)
def getbodytext(self, decode = 1):
if not decode:
return self.bodyencoded
| |
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import sys
import os
import urllib.request, urllib.parse, urllib.error
from .models import *
class GroupApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getGroup(self, **kwargs):
"""Returns groups by name and/or type
Args:
groupType, str: groupType (required)
groupName, str: groupName (required)
Returns: ResourceGroupListResult
"""
allParams = ['groupType', 'groupName']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getGroup" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('groupType' in params):
queryParams['groupType'] = self.apiClient.toPathValue(params['groupType'])
if ('groupName' in params):
queryParams['groupName'] = self.apiClient.toPathValue(params['groupName'])
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ResourceGroupListResult')
return responseObject
def updateGroup(self, **kwargs):
"""Updates a group specified by id
Args:
groupDTO, ResourceGroupDTO: Grouping request that holds the group information (required)
Returns: TaskIdResult
"""
allParams = ['groupDTO']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateGroup" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('groupDTO' in params):
bodyParam = params['groupDTO']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def createGroup(self, **kwargs):
"""Creates a group with the information passed in the request
Args:
groupDTO, ResourceGroupDTO: Grouping request that holds the group information (required)
Returns: TaskIdResult
"""
allParams = ['groupDTO']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createGroup" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('groupDTO' in params):
bodyParam = params['groupDTO']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getGroupCount(self, **kwargs):
"""Returns the number of groups
Args:
groupType, str: groupType (required)
groupName, str: groupName (required)
Returns: CountResult
"""
allParams = ['groupType', 'groupName']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getGroupCount" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/count'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('groupType' in params):
queryParams['groupType'] = self.apiClient.toPathValue(params['groupType'])
if ('groupName' in params):
queryParams['groupName'] = self.apiClient.toPathValue(params['groupName'])
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'CountResult')
return responseObject
def getSupportedClassNameAliases(self, **kwargs):
"""Retrieves the names for supported resource types
Args:
Returns: GroupTypesResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getSupportedClassNameAliases" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/member/type'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GroupTypesResult')
return responseObject
def getGroupsByMemberId(self, **kwargs):
"""Returns the groups containing the member
Args:
id, str: Member ID (required)
groupType, str: groupType (required)
Returns: ResourceGroupListResult
"""
allParams = ['id', 'groupType']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getGroupsByMemberId" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/member/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('groupType' in params):
queryParams['groupType'] = self.apiClient.toPathValue(params['groupType'])
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ResourceGroupListResult')
return responseObject
def getGroupTypes(self, **kwargs):
"""Returns the group types supported
Args:
Returns: GroupTypesResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getGroupTypes" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/type'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GroupTypesResult')
return responseObject
def getGroupById(self, **kwargs):
"""Returns the group by id
Args:
id, str: Group ID (required)
Returns: ResourceGroupResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getGroupById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ResourceGroupResult')
return responseObject
def deleteGroup(self, **kwargs):
"""Deletes a group specified by id
Args:
id, str: Group ID (required)
Returns: TaskIdResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteGroup" % key)
params[key] = val
del params['kwargs']
resourcePath = '/group/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getMembersByGroupId(self, **kwargs):
"""Returns the members from the group specified by | |
.. seealso:: `flatten`, `insert_dimension`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
The positions of the size one axes to be removed. By
default all size one axes are removed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The data with removed data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(1, 73, 1, 96)
>>> f.squeeze().shape
(73, 96)
>>> d.squeeze(0).shape
(73, 1, 96)
>>> d.squeeze([-3, 2]).shape
(73, 96)
>>> d.squeeze(2, inplace=True)
>>> d.shape
(1, 73, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't squeeze data: {error}")
shape = d.shape
if axes is None:
axes = tuple([i for i, n in enumerate(shape) if n == 1])
else:
# Check the squeeze axes
for i in axes:
if shape[i] > 1:
raise ValueError(
"Can't squeeze data: "
f"Can't remove axis of size {shape[i]}"
)
if not axes:
return d
array = self.array
array = numpy.squeeze(array, axes)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def sum(self, axes=None):
"""Return the sum of an array or the sum along axes.
Missing data array elements are omitted from the calculation.
.. seealso:: `max`, `min`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to calculate the sum. By default the
sum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
The sum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.sum()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[276]]]]>
>>> print(e.array)
[[[[276]]]]
>>> e = d.sum(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[12, ..., 57]]]]>
>>> print(e.array)
[[[[12 15 18 21]]
[[48 51 54 57]]]]
>>> e = d.sum([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[66, 210]]]]>
>>> print(e.array)
[[[[ 66]]
[[210]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't sum data: {error}")
array = self.array
array = numpy.sum(array, axis=axes, keepdims=True)
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
@_inplace_enabled(default=False)
def transpose(self, axes=None, inplace=False):
"""Permute the axes of the data array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `squeeze`
:Parameters:
axes: (sequence of) `int`
The new axis order. By default the order is reversed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with permuted data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.transpose().shape
(96, 73, 19)
>>> d.transpose([1, 0, 2]).shape
(73, 19, 96)
>>> d.transpose([-1, 0, 1], inplace=True)
>>> d.shape
(96, 19, 73)
"""
d = _inplace_enabled_define_and_cleanup(self)
ndim = d.ndim
# Parse the axes. By default, reverse the order of the axes.
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't transpose data: {error}")
if axes is None:
if ndim <= 1:
return d
axes = tuple(range(ndim - 1, -1, -1))
elif len(axes) != ndim:
raise ValueError(
f"Can't transpose data: Axes don't match array: {axes}"
)
# Return unchanged if axes are in the same order as the data
if axes == tuple(range(ndim)):
return d
array = self.array
array = numpy.transpose(array, axes=axes)
d._set_Array(array, copy=False)
return d
def get_compressed_axes(self):
"""Returns the dimensions that are compressed in the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`list`
The dimensions of the data that are compressed to a single
dimension in the underlying array. If the data are not
compressed then an empty list is returned.
**Examples:**
>>> d.shape
(2, 3, 4, 5, 6)
>>> d.compressed_array.shape
(2, 14, 6)
>>> d.get_compressed_axes()
[1, 2, 3]
>>> d.get_compression_type()
''
>>> d.get_compressed_axes()
[]
"""
ca = self._get_Array(None)
if ca is None:
return []
return ca.get_compressed_axes()
def get_compression_type(self):
"""Returns the type of compression applied to the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `compression_axes`,
`get_compressed_dimension`
:Returns:
`str`
The compression type. An empty string means that no
compression has been applied.
**Examples:**
>>> d.get_compression_type()
''
>>> d.get_compression_type()
'gathered'
>>> d.get_compression_type()
'ragged contiguous'
"""
ma = self._get_Array(None)
if ma is None:
return ""
return ma.get_compression_type()
@classmethod
def empty(cls, shape, dtype=None, units=None, calendar=None):
"""Create a new data array without initialising the elements.
Note that the mask of the returned empty data is hard.
.. seealso:: `full`, `ones`, `zeros`
:Parameters:
shape: `int` or `tuple` of `int`
The shape of the new array.
dtype: `numpy.dtype` or any object convertible to `numpy.dtype`
The data-type of the new array. By default the
data-type is ``float``.
units: `str` or `Units`
The units for the empty data array.
calendar: `str`, optional
The calendar for reference time units.
:Returns:
`{{class}}`
**Examples:**
>>> d = {{package}}.{{class}}.empty((96, 73))
"""
return cls(
numpy.empty(shape=shape, dtype=dtype),
units=units,
calendar=calendar,
)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
_check_values=True,
):
"""Whether two data arrays are the same.
Equality is strict by default. This means that for data arrays to
be considered equal:
* the units and calendar must be the same,
..
* the fill value must be the same (see the *ignore_fill_value*
parameter), and
..
* the arrays must have same shape and data type, the same missing
data mask, and be element-wise equal (see the *ignore_data_type*
parameter).
{{equals tolerance}}
Any compression is ignored by default, with only the arrays in
their uncompressed forms being compared. See the
*ignore_compression* parameter.
Any type of object may be tested but, in general, equality is only
possible with another cell measure construct, or a subclass of
one. See the *ignore_type* parameter.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
ignore_fill_value: `bool`, optional
If True then the fill value is omitted from the
comparison.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
{{ignore_type: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
:Returns:
`bool`
Whether the two data arrays are equal.
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a data array')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# Check that each instance has the same shape
if self.shape != other.shape:
logger.info(
f"{self.__class__.__name__}: Different shapes: "
f"{self.shape} != {other.shape}"
) # pragma: no cover
return False
# Check that each instance has the same fill value
if not ignore_fill_value and self.get_fill_value(
None
) != other.get_fill_value(None):
logger.info(
f"{self.__class__.__name__}: Different fill value: "
f"{self.get_fill_value(None)} != {other.get_fill_value(None)}"
) # pragma: no cover
return False
# Check that each instance has the same data type
if not ignore_data_type and self.dtype != other.dtype:
logger.info(
f"{self.__class__.__name__}: Different data types: "
f"{self.dtype} != {other.dtype}"
) # pragma: no cover
return False
# Return now if we have been asked to not check the array
# values
if not _check_values:
return True
# Check that each instance has the same units
for attr in ("units", "calendar"):
x = getattr(self, "get_" + attr)(None)
y = getattr(other, "get_" + attr)(None)
if x != y:
logger.info(
f"{self.__class__.__name__}: Different {attr}: "
f"{x!r} != {y!r}"
) # pragma: no cover
return False
if not ignore_compression:
# --------------------------------------------------------
# Check for equal compression types
# --------------------------------------------------------
compression_type = self.get_compression_type()
if compression_type != other.get_compression_type():
logger.info(
f"{self.__class__.__name__}: Different compression types: "
f"{compression_type} != {other.get_compression_type()}"
) # pragma: no cover
return False
# --------------------------------------------------------
# Check for equal compressed array values
# --------------------------------------------------------
if compression_type:
if not self._equals(
self.compressed_array,
other.compressed_array,
rtol=rtol,
atol=atol,
):
logger.info(
f"{self.__class__.__name__}: Different compressed "
"array values"
) # pragma: | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, timeit
import numpy as np
import matplotlib.pyplot as plt
from bmpProcessing.utils.Utils import get_int_from_bytes
class BmpProcessing:
'''
Implementation of an BMP format image
'''
def __init__(self, img, verbose):
'''
Initialisation BmpProcessing variables
'''
# input file name
self.img = img
# verbose to display more information
self.verbose = verbose
# all bitmap bytes
self.octets = []
# bitmap image bytes in matrix format
self.image_matrix = None
#BITMAP FILE HEADER
self.bf_type = None
self.bf_size = None
self.bf_reserved1 = None
self.bf_reserved2 = None
self.bf_offbits = None
#BITMAP INFO HEADER
self.bi_size = None
self.bi_width = None
self.bi_height = None
self.bi_planes = None
self.bi_bitcount = None
self.bi_compression = None
self.bi_sizeimage = None
self.bi_xpelspermeter = None
self.bi_ypelspermeter = None
self.bi_clrused = None
self.bi_clrimportant = None
# Bitmap palette
self.b_palette = []
# overlay part
self.overlay_name = None
self.overlay_header = []
self.overlay_palette = []
self.overlay_image = []
self.overlay_image_matrix = None
def fit(self, is_processing):
'''
Fit the bitmap header
1. if output is required we fit the bitmap palette and image bytes
'''
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start fitting time:", starttime)
#--------------------------------------
# fit all bitmap bytes into self.octets
self.ouverture_fichiers_image()
#file header
self.bf_type = self.octets[0:2]
self.bf_size = self.octets[2:6]
self.bf_reserved1 = self.octets[6:8]
self.bf_reserved2 = self.octets[8:10]
self.bf_offbits = self.octets[10:14]
#file info
self.bi_size = self.octets[14:18]
self.bi_width = self.octets[18:22]
self.bi_height = self.octets[22:26]
self.bi_planes = self.octets[26:28]
self.bi_bitcount = self.octets[28:30]
self.bi_compression = self.octets[30:34]
self.bi_sizeimage = self.octets[34:38]
self.bi_xpelspermeter = self.octets[38:42]
self.bi_ypelspermeter = self.octets[42:46]
self.bi_clrused = self.octets[46:50]
self.bi_clrimportant = self.octets[50:54]
# if output or histogram is required
if is_processing:
# fit bitmap palette
self.b_palette = self.octets[54:get_int_from_bytes(self.bf_offbits.tolist())]
# fit image matrix with bitmap image bytes
self.image_matrix = self.octets[get_int_from_bytes(self.bf_offbits.tolist()):].reshape(
get_int_from_bytes(self.bi_height.tolist()),
get_int_from_bytes(self.bi_width.tolist()),
int(get_int_from_bytes(self.bi_bitcount.tolist())/8)
)
if self.verbose == True:
print('image successfully loaded')
#-------performance calculation--------
print("Fitting duration:", timeit.default_timer() - starttime)
#--------------------------------------
def colorize_image(self, angle):
'''
colorize an image by shifting its hue
'''
def hsv_to_rgb(hsv):
rgb = np.empty_like(hsv)
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def rgb_to_hsv(rgb):
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select(
[r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
def shift_hue(arr, hout):
'''
1. convert rgb view to hsl view (hue, saturation, lightness)
2. apply hue angle to the image
3. convert back to rgb
'''
hsv = rgb_to_hsv(arr)
hsv[...,0] = hout
rgb = hsv_to_rgb(hsv)
return rgb
self.image_matrix = shift_hue(self.image_matrix, float(angle)/360)
if self.verbose == True:
print("colorization applied for {} degree".format(angle))
def filter_image(self, filter_type):
'''
Filter application with convolution matrix
1. sobel filter, edge detection
2. blur filter
3. edge reinforcement filter
4. emboss filter
'''
def filter(matrix, kernel):
'''
The goal of this function is to make 9 shifted copies where the kernel is applied of the original matrix
Then we just sum up these 9 matrix together and finally optimized the convolution algorithm
'''
# initialize list of matrix copies
copies = np.empty((len(kernel)*len(kernel), len(matrix), len(matrix[1]), int(get_int_from_bytes(self.bi_bitcount)/8)), dtype='int64')
# Go through the kernel (size: 3x3)
for i in range(np.shape(kernel)[1]):
for j in range(np.shape(kernel)[0]):
# Save copies of the original image shifted by 1 pixel around + kernel value application
copies[i*len(kernel) + j] = np.roll(matrix.copy(), (i-(len(kernel)//2), j-(len(kernel)//2)), (0,1)) * kernel[i][j]
# return the sum of each copies to get back our new matrix with kernel value applied
return copies.sum(axis=0)
if 'emboss' in filter_type or 'edge-reinforcement' in filter_type or 'edge-detection' in filter_type or 'blur' in filter_type :
kernel = np.empty((3, 3))
if 'edge-detection' in filter_type:
if self.verbose == True:
print('> Sobel edge detection filter')
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start Sobel edge detection time:", starttime)
#--------------------------------------
# Gradient horizontal
kernel1 = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
# Gradient vertical
kernel2 = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
res1 = filter(self.image_matrix, kernel1)
res2 = filter(self.image_matrix, kernel2)
self.image_matrix = np.sqrt(res1**2 + res2**2).astype('uint8')
if self.verbose == True:
#-------performance calculation--------
print("Sobel edge detection duration:", timeit.default_timer() - starttime)
#--------------------------------------
if 'blur' in filter_type:
if self.verbose == True:
print('> Blur filter')
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start blur filter processing time:", starttime)
#--------------------------------------
kernel = np.array([[1/256, 4/256, 6/256, 4/256, 1/256],
[4/256, 16/256, 24/256, 16/256, 4/256],
[6/256, 24/256, 36/256, 24/256, 6/256],
[4/256, 16/256, 24/256, 16/256, 4/256],
[1/256, 4/256, 6/256, 4/256, 1/256]])
self.image_matrix = (filter(self.image_matrix, kernel)).astype('uint8')
if self.verbose == True:
#-------performance calculation--------
print("Blur filter processing duration:", timeit.default_timer() - starttime)
#--------------------------------------
if 'edge-reinforcement' in filter_type:
if self.verbose == True:
print('> Edge reinforcement filter')
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start edge reinforcement filter processing time:", starttime)
#--------------------------------------
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
self.image_matrix = filter(self.image_matrix, kernel)
self.image_matrix[self.image_matrix > 255] = 255
self.image_matrix[self.image_matrix < 0] = 0
if self.verbose == True:
#-------performance calculation--------
print("Edge reinforcement filter processing duration:", timeit.default_timer() - starttime)
#--------------------------------------
if 'emboss' in filter_type:
if self.verbose == True:
print('> Emboss filter')
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start emboss filter processing time:", starttime)
#--------------------------------------
kernel = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
self.image_matrix = filter(self.image_matrix, kernel)
self.image_matrix[self.image_matrix > 255] = 255
self.image_matrix[self.image_matrix < 0] = 0
if self.verbose == True:
#-------performance calculation--------
print("Emboss filter processing duration:", timeit.default_timer() - starttime)
#-------------------------------
else:
print('Error: --filter argument is incorrect, no filter applied')
def overlay(self, option):
'''
Overlay two image together
1. option maximum between both images
2. option minimum between both images
'''
for i in range(np.shape(self.image_matrix)[1]):
for j in range(np.shape(self.image_matrix)[0]):
A = 0
B = 0
A = np.sum(self.image_matrix[i][j])
B = np.sum(self.overlay_image_matrix[i][j])
if 'maximum' in option:
self.image_matrix[i][j] = np.maximum(self.image_matrix[i][j], self.overlay_image_matrix[i][j])
elif 'minimum' in option:
self.image_matrix[i][j] = np.minimum(self.image_matrix[i][j], self.overlay_image_matrix[i][j])
def fit_overlay(self, overlay_name):
'''
Fit Overlay image
'''
if self.verbose == True:
#-------performance calculation--------
starttime = timeit.default_timer()
print("Start fitting overlay time:", starttime)
#--------------------------------------
self.overlay_name = overlay_name
#initialize overlay image
f_lecture = open(self.overlay_name,'rb')
# append overlay header
i = 0
while (i < 54):
octet = f_lecture.read(1)
self.overlay_header.append(ord(octet))
i += 1
bf_size = get_int_from_bytes(self.overlay_header[2:6])
bf_offbytes = get_int_from_bytes(self.overlay_header[10:14])
# append palette
while i < bf_offbytes:
octet = f_lecture.read(1)
self.overlay_palette.append(ord(octet))
i += 1
# append image
while i < bf_size:
octet = f_lecture.read(1)
self.overlay_image.append(ord(octet))
i += 1
self.overlay_header = np.array(self.overlay_header)
self.overlay_palette = np.array(self.overlay_palette)
self.overlay_image = np.array(self.overlay_image)
# fit image matrix with bitmap image bytes
self.overlay_image_matrix = self.overlay_image.reshape(
get_int_from_bytes(self.overlay_header[18:22].tolist()),
get_int_from_bytes(self.overlay_header[22:26].tolist()),
int(get_int_from_bytes(self.overlay_header[28:30].tolist())/8)
)
self.resize_image([get_int_from_bytes(self.overlay_header[22:26].tolist()), get_int_from_bytes(self.overlay_header[18:22].tolist())])
if self.verbose == True:
print('image to overload successfully loaded')
#-------performance calculation--------
print("Fitting duration:", timeit.default_timer() - starttime)
#--------------------------------------
if np.shape(self.overlay_image_matrix) != np.shape(self.image_matrix):
print('Error: Require both input images to be the same dimension')
print('{}: {}'.format(self.img, np.shape(self.image_matrix)))
print('{}: {}'.format(self.overlay_name, np.shape(self.overlay_image_matrix)))
f_lecture.close
sys.exit(-1)
f_lecture.close
def brightness_image(self, brightness):
'''
Image brightness adjustment
'''
# Applying brightness formula for each pixels (RGB) in | |
{}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryReadNextRequestApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryReadNextResponseApiModelJToken', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_read_raw_next.metadata = {'url': '/v2/history/read/{endpointId}/next'}
def history_update_raw(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Update node history using raw json.
Update node history using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history update request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelJToken
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_update_raw.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelJToken')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_update_raw.metadata = {'url': '/v2/history/update/{endpointId}'}
def history_insert_values(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Insert historic values.
Insert historic values using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history insert request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelInsertValuesDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_insert_values.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelInsertValuesDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_insert_values.metadata = {'url': '/v2/insert/{endpointId}/values'}
def history_insert_events(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Insert historic events.
Insert historic events using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history insert request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelInsertEventsDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_insert_events.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelInsertEventsDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_insert_events.metadata = {'url': '/v2/insert/{endpointId}/events'}
def history_read_events(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read historic events.
Read historic events of a node if available using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read request
:type request:
~azure-iiot-opc-history.models.HistoryReadRequestApiModelReadEventsDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryReadResponseApiModelHistoricEventApiModel or
ClientRawResponse if raw=true
:rtype:
~azure-iiot-opc-history.models.HistoryReadResponseApiModelHistoricEventApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_read_events.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryReadRequestApiModelReadEventsDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryReadResponseApiModelHistoricEventApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_read_events.metadata = {'url': '/v2/read/{endpointId}/events'}
def history_read_events_next(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read next batch of historic events.
Read next batch of historic events of a node using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read next request
:type request:
~azure-iiot-opc-history.models.HistoryReadNextRequestApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryReadNextResponseApiModelHistoricEventApiModel or
ClientRawResponse if raw=true
:rtype:
~azure-iiot-opc-history.models.HistoryReadNextResponseApiModelHistoricEventApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_read_events_next.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryReadNextRequestApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryReadNextResponseApiModelHistoricEventApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_read_events_next.metadata = {'url': '/v2/read/{endpointId}/events/next'}
def history_read_values(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read historic processed values at specified times.
Read processed history values of a node if available using historic
access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read request
:type request:
~azure-iiot-opc-history.models.HistoryReadRequestApiModelReadValuesDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryReadResponseApiModelHistoricValueApiModel or
ClientRawResponse if raw=true
:rtype:
~azure-iiot-opc-history.models.HistoryReadResponseApiModelHistoricValueApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_read_values.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryReadRequestApiModelReadValuesDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryReadResponseApiModelHistoricValueApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_read_values.metadata = {'url': '/v2/read/{endpointId}/values'}
def history_read_values_at_times(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read historic values at specified times.
Read historic values of a node if available using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read request
| |
dictionaryCreationThreshold: int)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Comparer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the generic equality comparer that is used to determine equality of keys in the collection.
Get: Comparer(self: KeyedCollection[TKey, TItem]) -> IEqualityComparer[TKey]
"""
Dictionary = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the lookup dictionary of the System.Collections.ObjectModel.KeyedCollection.
"""
Items = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a System.Collections.Generic.IList wrapper around the System.Collections.ObjectModel.Collection.
"""
class ObservableCollection(Collection[T], IList[T], ICollection[T], IEnumerable[T], IEnumerable, IList, ICollection, IReadOnlyList[T], IReadOnlyCollection[T], INotifyCollectionChanged, INotifyPropertyChanged):
"""
ObservableCollection[T]()
ObservableCollection[T](list: List[T])
ObservableCollection[T](collection: IEnumerable[T])
"""
def add_PropertyChanged(self, *args): #cannot find CLR method
""" add_PropertyChanged(self: ObservableCollection[T], value: PropertyChangedEventHandler) """
pass
def BlockReentrancy(self, *args): #cannot find CLR method
"""
BlockReentrancy(self: ObservableCollection[T]) -> IDisposable
Disallows reentrant attempts to change this collection.
Returns: An System.IDisposable object that can be used to dispose of the object.
"""
pass
def CheckReentrancy(self, *args): #cannot find CLR method
"""
CheckReentrancy(self: ObservableCollection[T])
Checks for reentrant attempts to change this collection.
"""
pass
def ClearItems(self, *args): #cannot find CLR method
"""
ClearItems(self: ObservableCollection[T])
Removes all items from the collection.
"""
pass
def InsertItem(self, *args): #cannot find CLR method
"""
InsertItem(self: ObservableCollection[T], index: int, item: T)
Inserts an item into the collection at the specified index.
index: The zero-based index at which item should be inserted.
item: The object to insert.
"""
pass
def Move(self, oldIndex, newIndex):
"""
Move(self: ObservableCollection[T], oldIndex: int, newIndex: int)
Moves the item at the specified index to a new location in the collection.
oldIndex: The zero-based index specifying the location of the item to be moved.
newIndex: The zero-based index specifying the new location of the item.
"""
pass
def MoveItem(self, *args): #cannot find CLR method
"""
MoveItem(self: ObservableCollection[T], oldIndex: int, newIndex: int)
Moves the item at the specified index to a new location in the collection.
oldIndex: The zero-based index specifying the location of the item to be moved.
newIndex: The zero-based index specifying the new location of the item.
"""
pass
def OnCollectionChanged(self, *args): #cannot find CLR method
"""
OnCollectionChanged(self: ObservableCollection[T], e: NotifyCollectionChangedEventArgs)
Raises the System.Collections.ObjectModel.ObservableCollection event with the
provided arguments.
e: Arguments of the event being raised.
"""
pass
def OnPropertyChanged(self, *args): #cannot find CLR method
"""
OnPropertyChanged(self: ObservableCollection[T], e: PropertyChangedEventArgs)
Raises the System.Collections.ObjectModel.ObservableCollection event with the
provided arguments.
e: Arguments of the event being raised.
"""
pass
def RemoveItem(self, *args): #cannot find CLR method
"""
RemoveItem(self: ObservableCollection[T], index: int)
Removes the item at the specified index of the collection.
index: The zero-based index of the element to remove.
"""
pass
def remove_PropertyChanged(self, *args): #cannot find CLR method
""" remove_PropertyChanged(self: ObservableCollection[T], value: PropertyChangedEventHandler) """
pass
def SetItem(self, *args): #cannot find CLR method
"""
SetItem(self: ObservableCollection[T], index: int, item: T)
Replaces the element at the specified index.
index: The zero-based index of the element to replace.
item: The new value for the element at the specified index.
"""
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type, list: List[T])
__new__(cls: type, collection: IEnumerable[T])
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Items = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a System.Collections.Generic.IList wrapper around the System.Collections.ObjectModel.Collection.
"""
CollectionChanged = None
class ReadOnlyCollection(object, IList[T], ICollection[T], IEnumerable[T], IEnumerable, IList, ICollection, IReadOnlyList[T], IReadOnlyCollection[T]):
""" ReadOnlyCollection[T](list: IList[T]) """
def Contains(self, value):
"""
Contains(self: ReadOnlyCollection[T], value: T) -> bool
Determines whether an element is in the
System.Collections.ObjectModel.ReadOnlyCollection.
value: The object to locate in the System.Collections.ObjectModel.ReadOnlyCollection.
The value can be null for reference types.
Returns: true if value is found in the
System.Collections.ObjectModel.ReadOnlyCollection; otherwise, false.
"""
pass
def CopyTo(self, array, index):
""" CopyTo(self: ReadOnlyCollection[T], array: Array[T], index: int) """
pass
def GetEnumerator(self):
"""
GetEnumerator(self: ReadOnlyCollection[T]) -> IEnumerator[T]
Returns an enumerator that iterates through the
System.Collections.ObjectModel.ReadOnlyCollection.
Returns: An System.Collections.Generic.IEnumerator for the
System.Collections.ObjectModel.ReadOnlyCollection.
"""
pass
def IndexOf(self, value):
"""
IndexOf(self: ReadOnlyCollection[T], value: T) -> int
Searches for the specified object and returns the zero-based index of the first
occurrence within the entire System.Collections.ObjectModel.ReadOnlyCollection.
value: The object to locate in the System.Collections.Generic.List. The value can be
null for reference types.
Returns: The zero-based index of the first occurrence of item within the entire
System.Collections.ObjectModel.ReadOnlyCollection, if found; otherwise, -1.
"""
pass
def __contains__(self, *args): #cannot find CLR method
"""
__contains__(self: ICollection[T], item: T) -> bool
Determines whether the System.Collections.Generic.ICollection contains a
specific value.
item: The object to locate in the System.Collections.Generic.ICollection.
Returns: true if item is found in the System.Collections.Generic.ICollection; otherwise,
false.
__contains__(self: IList, value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,
false.
"""
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(self, list):
""" __new__(cls: type, list: IList[T]) """
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the number of elements contained in the System.Collections.ObjectModel.ReadOnlyCollection instance.
Get: Count(self: ReadOnlyCollection[T]) -> int
"""
Items = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Returns the System.Collections.Generic.IList that the System.Collections.ObjectModel.ReadOnlyCollection wraps.
"""
class ReadOnlyDictionary(object, IDictionary[TKey, TValue], ICollection[KeyValuePair[TKey, TValue]], IEnumerable[KeyValuePair[TKey, TValue]], IEnumerable, IDictionary, ICollection, IReadOnlyDictionary[TKey, TValue], IReadOnlyCollection[KeyValuePair[TKey, TValue]]):
""" ReadOnlyDictionary[TKey, TValue](dictionary: IDictionary[TKey, TValue]) """
def ContainsKey(self, key):
""" ContainsKey(self: ReadOnlyDictionary[TKey, TValue], key: TKey) -> bool """
pass
def GetEnumerator(self):
""" GetEnumerator(self: ReadOnlyDictionary[TKey, TValue]) -> IEnumerator[KeyValuePair[TKey, TValue]] """
pass
def TryGetValue(self, key, value):
""" TryGetValue(self: ReadOnlyDictionary[TKey, TValue], key: TKey) -> (bool, TValue) """
pass
def __contains__(self, *args): #cannot find CLR method
"""
__contains__(self: IDictionary[TKey, TValue], key: TKey) -> bool
Determines whether the System.Collections.Generic.IDictionary contains an
element with the specified key.
key: The key to locate in the System.Collections.Generic.IDictionary.
Returns: true if the System.Collections.Generic.IDictionary contains an element with the
key; otherwise, false.
__contains__(self: IDictionary, key: object) -> bool
Determines whether the System.Collections.IDictionary object contains an
element with the specified key.
key: The key to locate in the System.Collections.IDictionary object.
Returns: true if the System.Collections.IDictionary contains an element with the key;
otherwise, false.
"""
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" | |
import sys
import os
import unittest
import shutil
import copy
from io import StringIO
sys.path.append(".")
from mock_gff3 import Create_generator
from mock_helper import import_data, gen_file
import annogesiclib.sORF_detection as sd
from mock_args_container import MockClass
get_coverage = copy.deepcopy(sd.get_coverage)
class Mock_func(object):
def mock_get_coverage(self, inter_datas, wigs, strand,
background, test1, test2, back):
return "2"
def mock_replicate_comparison(
self, srna_covers, template_texs, strand, cutoff_coverage,
tex_notex, type_, median, coverages, utr_type, notex):
return {"best": 20, "high": 50, "low": 10, "start": 1,
"end": 10, "track": "track_1", "detail": [],
"conds": {"frag": "track_1"}}
def mock_read_libs(self, input_libs, wig_folder):
return None, None
def mock_read_wig(self, wig_file, strand, libs):
return None
def mock_get_inter_coverage(self, inters, inter_covers):
pass
class TestsORFDetection(unittest.TestCase):
def setUp(self):
self.example = Example()
self.mock_args = MockClass()
self.mock = Mock_func()
self.test_folder = "test_folder"
self.fasta = "test_folder/fasta"
self.wigs = "test_folder/wig"
self.gff = "test_folder/gff"
if (not os.path.exists(self.test_folder)):
os.mkdir(self.test_folder)
os.mkdir(self.fasta)
os.mkdir(self.wigs)
os.mkdir(self.gff)
def tearDown(self):
if os.path.exists(self.test_folder):
shutil.rmtree(self.test_folder)
def test_get_coverage(self):
coverages = {"3utr": "median", "5utr": "median",
"inter": 5, "interCDS": "median"}
medianlist = {"aaa": {"3utr": {"track_1": {"median": 3}},
"5utr": {"track_1": {"median": 6}},
"interCDS": {"track_1": {"median": 2}},
"inter": {"track_1": {"median": 5}}}}
cutoffs = {"track_1": 0}
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1]}
covers = sd.get_coverage(sorf, self.example.wigs, "+", coverages,
medianlist, cutoffs, 10)
self.assertDictEqual(covers, {'frag_1': [
{'low': 2, 'avg': 33.4, 'high': 100, 'pos': 2,
'track': 'track_1', 'type': 'frag'}]})
def test_detect_rbs_site(self):
args = self.mock_args.mock()
args.max_len = 20
args.min_len = 3
args.fuzzy_rbs = 2
args.rbs_seq = ["AGGAGG"]
detect = sd.detect_rbs_site("AGGAGGCCGCTATGCCACACGT", 2,
self.example.tas[0], args)
self.assertListEqual(detect, [1])
def test_detect_start_stop(self):
seq = {"aaa": "TAGGAGGCCGCTATGCCATTA"}
args = self.mock_args.mock()
args.start_codon = ["ATG"]
args.stop_codon = ["TTA"]
args.max_len = 20
args.min_len = 3
args.fuzzy_rbs = 2
args.rbs_seq = ["AGGAGG"]
args.multi_stop = True
sorf = sd.detect_start_stop(self.example.tas, seq, args)
self.assertListEqual(sorf, [
{'strand': '+', 'type': 'intergenic', 'starts': ['13'],
'print': False, 'seq': 'ATGCCATTA', 'ends': ['21'],
'end': 21, 'start': 13, 'rbs': [2], 'strain': 'aaa'}])
seq = {"aaa": "TTAAAGGCATTATCCTCCTA"}
self.example.tas[0].strand = "-"
sorf = sd.detect_start_stop(self.example.tas, seq, args)
self.assertListEqual(sorf, [
{'end': 10, 'starts': ['2'], 'strain': 'aaa', 'ends': ['10'],
'type': 'intergenic', 'print': False, 'seq': 'TAAAGGCAT',
'rbs': [19], 'strand': '-', 'start': 2}])
self.example.tas[0].strand = "+"
def test_read_data(self):
inter = os.path.join(self.test_folder, "inter")
fasta = os.path.join(self.test_folder, "fa")
gen_file(inter, self.example.inter)
gen_file(fasta, ">aaa\nATATACCGATC")
inters, tsss, srnas, seq = sd.read_data(inter, None, None, fasta, True)
self.assertEqual(inters[0].start, 2)
self.assertDictEqual(seq, {'aaa': 'ATATACCGATC'})
def test_check_tss(self):
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1], "with_TSS": []}
checks = {"start": False, "rbs": False, "import": False}
sd.check_tss(sorf, self.example.tsss[0], 300, checks)
self.assertDictEqual(checks, {'start': True, 'rbs': [1],
'import': True})
def test_compare_sorf_tss(self):
sorfs = [{"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1]}]
args = self.mock_args.mock()
args.utr_length = 300
args.noafter_tss = False
args.no_tss = False
sorfs_all, sorfs_best = sd.compare_sorf_tss(
sorfs, self.example.tsss, "tss", args)
self.assertListEqual(sorfs_all, [
{'print': False, 'ends': ['10'], 'strand': '+',
'end': 6, 'type': '3utr', 'starts': ['2'], 'seq': 'ATGTA',
'strain': 'aaa', 'start': 2, 'rbs': [1],
'start_TSS': '1_+', 'with_TSS': ['TSS:1_+']}])
self.assertListEqual(sorfs_best, [
{'print': False, 'ends': ['10'], 'strand': '+',
'end': 6, 'type': '3utr', 'starts': ['2'], 'seq': 'ATGTA',
'strain': 'aaa', 'start': 2, 'rbs': [1],
'with_TSS': ['TSS:1_+'], 'start_TSS': '1_+'}])
def test_compare_sorf_srna(self):
sorfs = [{"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1]}]
sd.compare_sorf_srna(sorfs, self.example.srnas, "test")
self.assertListEqual(sorfs, [
{'print': False, 'starts': ['2'], 'seq': 'ATGTA', 'strand': '+',
'srna': ['sRNA:5-8_+'], 'end': 6, 'rbs': [1], 'ends': ['10'],
'start': 2, 'strain': 'aaa', 'type': '3utr'}])
def test_import_overlap(self):
sorf1 = {"strain": "aaa", "strand": "+", "start": 2, "end": 6, "srna": ["NA"],
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1], "start_TSS": "1"}
sorf2 = {"strain": "aaa", "strand": "+", "start": 5, "end": 15, "srna": ["NA"],
"starts": [str(5)], "ends": [str(15)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [2], "start_TSS": "2"}
final = {"strain": "aaa", "strand": "+", "start": 2, "end": 6, "srna": ["NA"],
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1], "start_TSS": "1"}
sd.import_overlap(sorf2, final, sorf1, True)
self.assertDictEqual(final, {
'end': 15, 'candidate': ['2-6_TSS:1_RBS:1', '5-15_TSS:2_RBS:2'],
'start': 2, 'rbs': [1, 2], 'strand': '+', 'strain': 'aaa',
'print': False, 'seq': 'ATGTA', 'ends': ['10', '15'], "srna": ["NA"],
'start_TSS': '1', 'type': '3utr', 'starts': ['2', '5']})
def test_merge(self):
seq = {"aaa": "TAGGAGGCCGCTATGCCATTA"}
sorfs = [{"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1], "srna": ["sRNA1"],
"start_TSS": "1"},
{"strain": "aaa", "strand": "+", "start": 5 , "end": 15,
"starts": [str(5)], "ends": [str(15)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [2], "srna": ["sRNA2"],
"start_TSS": "2"}]
finals = sd.merge(sorfs, seq)
self.assertDictEqual(finals[0], {
'start_TSS': '1', 'rbs': [1, 2], 'strand': '+', 'strain': 'aaa',
'start': 2, 'candidate': ['2-6_TSS:1_RBS:1', '5-15_TSS:2_RBS:2'],
'ends': ['10', '6', '15'], 'starts': ['2', '5'], 'type': '3utr',
'end': 15, 'seq': 'AGGAGGCCGCTATG', "srna": ["sRNA1", "sRNA2"]})
def test_assign_utr_cutoff(self):
coverages = {"3utr": "median", "5utr": 20,
"interCDS": 11, "intergenic": 59}
medians = {"track": {"median": 50, "mean": 20}}
cutoff =sd.assign_utr_cutoff(coverages, "3utr", medians, "track", 10)
self.assertEqual(cutoff, 50)
def test_get_cutoff(self):
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": [1], "start_TSS": "1"}
coverages = {"3utr": "median", "5utr": 20,
"interCDS": 11, "intergenic": 59}
medians = {"aaa": {"3utr": {"track_1": {"median": 50, "mean": 20}}}}
cutoff = sd.get_cutoff(sorf, "track_1", coverages, medians, 10)
self.assertEqual(cutoff, 50)
def test_get_attribute(self):
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["1"], "start_TSS": "1",
"with_TSS": "NA", "srna": "NA", "shift": 1}
string = sd.get_attribute(1, "sORF_1", "4", sorf, "utr")
self.assertEqual(
string,
"ID=aaa_sorf1;Name=sORF_sORF_1;start_TSS=4;with_TSS=N,A;sORF_type=3utr;sRNA=N,A;rbs=1;frame_shift=1")
def test_print_file(self):
out_g = StringIO()
out_t = StringIO()
sorf = {"strain": "aaa", "strand": "+", "start": 10, "end": 15,
"starts": [str(10)], "ends": [str(15)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["3"], "start_TSS": "1",
"with_TSS": ["NA"], "srna": ["NA"], "candidate": ["AAA"],
"shift": 1}
sorf_datas = {"best": 20, "high": 50, "low": 10, "start": 1,
"end": 10, "track": "track_1", "detail": [],
"conds": {"frag": "track_1"}}
args = self.mock_args.mock()
args.table_best = True
args.print_all = True
sd.print_file(sorf, sorf_datas, 1, out_g, out_t, "best", args)
self.assertEqual(
out_g.getvalue(),
"aaa\tANNOgesic\tsORF\t10\t15\t.\t+\t.\tID=aaa_sorf1;Name=sORF_00001;start_TSS=1;with_TSS=NA;sORF_type=3utr;sRNA=NA;rbs=RBS_3;frame_shift=1\n")
self.assertEqual(
out_t.getvalue(),
"aaa\tsORF_00001\t10\t15\t+\t3'UTR_derived\tNA\tRBS_3\t10\t15\tNA\t1\tFragmented\t20\t\tATGTA\tAAA\n")
def test_print_table(self):
out_t = StringIO()
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["1"], "start_TSS": "1",
"with_TSS": ["NA"], "srna": ["NA"], "candidate": ["AAA"],
"shift": 1}
sorf_datas = {"best": 20, "high": 50, "low": 10, "start": 1,
"end": 10, "track": "track_1", "detail": [],
"conds": {"frag": "track_1"}}
args = self.mock_args.mock()
args.table_best = True
args.print_all = True
sd.print_table(out_t, sorf, "test", "3utr", "frag", sorf_datas, args)
self.assertEqual(
out_t.getvalue(),
"aaa\tsORF_test\t2\t6\t+\t3utr\tNA\t1\t2\t10\tNA\t1\tfrag\t20\t\tATGTA\tAAA\n")
def test_get_inter_coverage(self):
inter_covers = {}
inters = [{"frag": [{"track": "track_1", "avg": 22}]}]
sd.get_inter_coverage(inters, inter_covers)
self.assertDictEqual(inter_covers, {'track_1': [22]})
def test_detect_utr_type(self):
ta_dict = [{"seq_id": "aaa", "source": "intergenic",
"feature": "Transcript", "start": 1,
"end": 23, "phase": ".", "strand": "+", "score": "."}]
attributes_tas = [{"ID": "tran0", "Name": "Transcript_0",
"UTR_type": "intergenic"}]
tas = []
tas.append(Create_generator(ta_dict[0], attributes_tas[0], "gff"))
sd.get_coverage = self.mock.mock_get_coverage
med_inters = {"aaa": {"intergenic": []}}
sd.detect_utr_type(tas[0], "intergenic", med_inters,
"wigs", "+", "test")
sd.get_coverage = get_coverage
self.assertDictEqual(med_inters, {'aaa': {'intergenic': ["2"]}})
def test_median_score(self):
num = sd.median_score([1, 3, 11, 42, 2, 32, 111], "p_0.5")
self.assertEqual(num, 11)
def test_mean_score(self):
num = sd.mean_score([1, 3, 11, 42, 2, 32, 111])
self.assertEqual(num, 28.857142857142858)
def test_validate_tss(self):
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["1"], "start_TSS": "3",
"with_TSS": ["TSS:3_+"], "srna": ["NA"], "candidate": ["AAA"]}
datas = sd.validate_tss([2], [6], sorf, 300)
self.assertEqual(datas, (['TSS:3_+'], 'NA'))
def test_validate_srna(self):
sorf = {"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["1"], "start_TSS": "1",
"with_TSS": ["TSS:3_+"], "srna": ["sRNA:2-5_+"],
"candidate": ["AAA"]}
srnas = sd.validate_srna([2], [6], sorf)
self.assertListEqual(srnas, ['sRNA:2-5_+'])
def test_get_best(self):
sorfs = [{"strain": "aaa", "strand": "+", "start": 2, "end": 6,
"starts": [str(2)], "ends": [str(10)], "seq": "ATGTA",
"type": "3utr", "print": False, "rbs": ["1"], "start_TSS": "1",
"with_TSS": ["TSS:3_+"], "srna": ["sRNA:2-5_+"],
"candidate": ["2-6_TSS:3_RBS:1"]}]
args = self.mock_args.mock()
args.table_best = True
args.no_srna = True
args.utr_length = 300
data = sd.get_best(sorfs, "tss", "srna", args)
self.assertListEqual(data, [
{'type': '3utr', 'strand': '+', 'print': False,
'with_TSS': ['TSS:3_+'], 'starts': ['2'], 'start': 2,
'srna': ['sRNA:2-5_+'], 'rbs': ['1'], 'end': 6, | |
postcode.
'''
return (elem.attrib['k'] == 'addr:postcode')
def audit(self, audit_file):
'''
Iterates over XML tag elements in order to find all of the addresses
of type street.
Evaluates the tag 'v' attributes to determine if the street suffixes
are within the expected street suffix list.
@return: Defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
with open(audit_file, 'r') as f:
street_types = defaultdict(set)
zip_types = defaultdict(set)
f.seek(0)
for event, elem in ET.iterparse(f, events=('start',)):
if elem.tag == 'node' or elem.tag == 'way':
for tag in elem.iter('tag'):
if self.isStreetName(tag):
self.auditStreetType(street_types, tag.attrib['v'])
if self.isZipCode(tag):
self.auditZipType(zip_types, tag.attrib['v'])
elem.clear()
street_types = self.sortStreets(street_types)
return [street_types, zip_types]
def sortStreets(self, unsorted_streets):
'''
Sorts street types defaultdict by key, with proper values.
unsorted_streets: Unsorted defaultdict of street types with values
equal to the instances of street type
(a defaultdict of strings)
@return: Sorted defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
sorted_streets = {}
sorted_keys = sorted(unsorted_streets.keys())
for key in sorted_keys:
sorted_streets[key] = unsorted_streets[key]
return sorted_streets
def clean(self, unexpected_dirty_streets):
'''
Get unexpected street suffixes and replace with acceptable street
suffixes when determined that the data is unacceptably dirty.
Assumes that every key given by self.audit() is of type string.
Assumes that every assigned to a key value given by self.adult() is of
type string.
Assumes that every key given by self.audit() has valid string value.
@return: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
unexpected_streets = unexpected_dirty_streets.copy()
#Iterate over unexpected street types found
for key in unexpected_streets.keys():
# Determine if unexpected street type is not acceptable
if key in self.dirty_to_clean_streets.keys():
list_of_streets = list(unexpected_streets[key])
# Iterate over streets of unacceptable street type
for i, street in enumerate(list_of_streets):
street_name = street[ : -len(key)]
good_street = (street_name + self.dirty_to_clean_streets[key])
bad_street = str(list(unexpected_streets[key])[i])
# Save each unacceptabled street as [key] to
# acceptable street as [value] in clean_streets_dict
self.clean_streets_dict[bad_street] = good_street
return self.clean_streets_dict
def writeClean(self, cleaned_streets):
'''
Get cleaned streets mapping dictionary and use that dictionary to find
and replace all bad street name tag attributes within XML file.
Iterate through XML file to find all bad instances of tag attribute
street names, and replace with correct mapping value from cleaned_streets
mapping dictionary.
Stores new cleaned XML file in 'output.osm'
celaned_streets: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
with open('output.osm', 'w') as output:
output.write("<?xml version='1.0' encoding='UTF-8'?>\n")
output.write('<osm>\n ')
osm_file = open(self.getSampleFile(), 'r')
for event, elem in ET.iterparse(osm_file, events=('start', 'end')):
# Begin processing when the end of the element is reached
# Include all elements, except 'osm', for processing (so that your files are identical)
if event == 'end' and (elem.tag in ['node', 'way', 'relation', 'bounds','meta','note'] ):
for tag in elem.iter('tag'):
# Check if tag is a street name tag, set street name to street
if self.isStreetName(tag):
street = tag.attrib['v']
# If street name is in clean streets dict, replace
# dirty street with clean street value
if street in cleaned_streets.keys():
tag.attrib['v'] = cleaned_streets[street]
# Check if tag is a zip code tag, set zip code to 'NaN' if not valid
if self.isZipCode(tag):
zip_code = tag.attrib['v']
if zip_code not in self.getExpectedZip():
tag.attrib['v'] = 'NaN'
# Move the write function inside the condition, so that it only writes
# tags that you specify (i.e. everything apart from the root <osm> element)
output.write(ET.tostring(elem, encoding='utf-8'))
elem.clear()
output.write('</osm>')
osm_file.close()
class JsonFile(object):
def __init__(self, output_file):
'''
Initialize a JSON File instance, saves all parameters as attributes
of the instance. Takes in an XML file and returns a JSON file
lower: Regex created to find lowercase characters for
tag elements (a regex)
lower_colon: Regex created to find lowercase characters for
tag elements when a colon is included (a regex)
problemchars: Regex created to find special characters for
tags and tag elements (a regex)
created_tags: Tag element names, which are deemed as acceptable for
adding information (a list of strings)
output_file: XML OSM output file, created in given output_file
path (a string)
'''
self.lower = re.compile(r'^([a-z]|_)*$')
self.lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
self.problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
self.created_tags = [ 'version', 'changeset', 'timestamp', 'user', 'uid']
self.output_file = output_file
def getElement(self, file_in, tags=('node', 'way', 'relation')):
'''
XML tag element generator
tags: tag elements to search for in OSM file (a tuple of strings)
@yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
'''
context = iter(ET.iterparse(file_in, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def shapeElement(self, element):
'''
Takes in XML element, shapes it into JSON node as dictionary, returns shaped element.
element: XML ElementTree element, which is shaped into JSON node (an ET object)
@return: node for JSON file creation (a dictionary)
'''
node = {}
address = {}
created = {}
node_refs = []
pos = []
if element.tag == 'node' or element.tag == 'way' :
node['type'] = element.tag
# Get and store GPS (lat, lon) cooridinates
if 'lat' in element.attrib.keys() and 'lon' in element.attrib.keys():
try:
lat = float(element.attrib['lat'])
lon = float(element.attrib['lon'])
pos.insert(0,lat)
pos.insert(1,lon)
except:
pass
# Get and set {tag : attrib} into dict
for k, m in element.attrib.items():
if k not in pos:
if k in self.created_tags:
created[k] = m
else:
node[k] = m
# Get and set node type into node dict
if created:
node['created'] = created
if pos:
node['pos'] = pos
if address:
node['address'] = address
if node_refs:
node['node_refs'] = node_refs
if 'lon' in node.keys():
node.pop('lon')
if 'lat' in node.keys():
node.pop('lat')
# Iterate over subtags in element, set attribs when valid
for child in element:
if child.tag == 'nd':
try:
node['node_refs'].append(child.attrib['ref'])
except:
node['node_refs'] = []
node['node_refs'].append(child.attrib['ref'])
elif child.tag == 'tag':
# Clean and set 'addr:' attrib
if self.problemchars.search(child.attrib['k']):
pass
elif child.attrib['k'].startswith('addr:'):
key = re.sub('addr:', '', child.attrib['k']).strip()
if self.lower_colon.match(key):
break
else:
try:
node['address'][key] = child.attrib['v']
except:
node['address'] = {}
node['address'][key] = child.attrib['v']
# Set already clean attrib
else:
node[child.attrib['k']] = child.attrib['v']
return node
else:
return None
def processMap(self, pretty = False):
'''
Takes an XML file, maps and creates a JSON file of the same information,
struction, and element nodes as the input XML file
pretty: If pretty, creates a human readable JSON file (a bool)
@return: List of JSON dictionary shaped node elements (a list)
'''
file_in = self.output_file
file_out = '{0}.json'.format(file_in)
data = []
'''
# Create JSON output file, shape and map each XML element
with codecs.open(file_out, 'w') as fo:
for _, element in ET.iterparse(file_in):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
'''
with codecs.open(file_out, 'w') as fo:
for i, element in enumerate(self.getElement(file_in)):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent = 2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
def mongoAggregate(cursor):
'''
Takes in pymongo aggregate cursor object, iterates through each element
within the aggregation, then returns the list of elements
cursor: pymongo aggreate cursor object, which is iterated (a cursor object)
@return: List of aggregation elements (a list)
'''
results_list = []
[results_list.append(result) for result in cursor]
return results_list
if __name__ == '__main__':
# Get OSM File, which is Brooklyn OpenStreetMap
# https://mapzen.com/data/metro-extracts/metro/brooklyn_new-york/
xml_original_file = 'brooklyn_new-york.osm' # Original OSM File input name
xml_sample_file = 'sample.osm' # Sample OSM File output name
xml_cleaned_file = 'output.osm'
sample_size = 1
# Initialize and create OSM original file | |
#MenuTitle: Vertical Metrics Manager
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
try:
from builtins import str
except Exception as e:
print("Warning: 'future' module not installed. Run 'sudo pip install future' in Terminal.")
__doc__="""
Manage and sync ascender, descender and linegap values for hhea, OS/2 sTypo and OS/2 usWin.
"""
import vanilla
def cleanInt(numberString):
exportString = ""
numberString = unicode(numberString)
for char in numberString:
if char in "1234567890+-":
exportString += char
floatNumber = float(exportString)
floatNumber = round(floatNumber)
return int(floatNumber)
def roundUpByValue(x, roundBy):
if x == 0:
# avoid division by zero
return 0
else:
sign = x/abs(x) # +1 or -1
factor=0
if x%roundBy:
factor=1
return int((abs(x)//roundBy*roundBy + factor*roundBy) * sign)
class VerticalMetricsManager( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 330
windowHeight = 410
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Vertical Metrics Manager", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.VerticalMetricsManager.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Manage and sync hhea, typo and win values.", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleAscent = vanilla.TextBox( (inset+70, linePos+4, 70, 14), u"Ascender", sizeStyle='small', selectable=True )
self.w.titleDescent = vanilla.TextBox( (inset+140, linePos+4, 70, 14), u"Descender", sizeStyle='small', selectable=True )
self.w.titleLineGap = vanilla.TextBox( (inset+210, linePos+4, 70, 14), u"Line Gap", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleWin = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 usWin", sizeStyle='small', selectable=True )
self.w.winAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winAsc.getNSTextField().setToolTip_("OS/2 usWinAscent. Should be the maximum height in your font. Expect clipping or rendering artefacts beyond this point.")
self.w.winDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winDesc.getNSTextField().setToolTip_("OS/2 usWinDescent (unsigned integer). Should be the maximum depth in your font, like the lowest descender you have. Expect clipping or rendering artefacts beyond this point.")
self.w.winGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=None, sizeStyle='small', readOnly=True, placeholder=u"n/a" )
self.w.winGap.getNSTextField().setToolTip_("OS/2 usWinLineGap does not exist, hence greyed out here.")
self.w.winUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.winUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 usWin values in the fields to the left. Takes the measurement settings below into account, except for the Limit options.")
linePos += lineHeight
self.w.titleTypo = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 sTypo", sizeStyle='small', selectable=True )
self.w.typoAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoAsc.getNSTextField().setToolTip_("OS/2 sTypoAscender (positive value), should be the same as hheaAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in DTP and office apps and together with the line gap value, also in browsers.")
self.w.typoDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoDesc.getNSTextField().setToolTip_("OS/2 sTypoDescender (negative value), should be the same as hheaDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.typoGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoGap.getNSTextField().setToolTip_("OS/2 sTypoLineGap (positive value), should be the same as hheaLineGap. Should be either zero or a value for padding between lines that makes sense visually. Office apps insert this distance between the lines, browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.typoUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.typoUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 sTypo values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.titleHhea = vanilla.TextBox( (inset, linePos+3, 70, 14), u"hhea", sizeStyle='small', selectable=True )
self.w.hheaAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaAsc.getNSTextField().setToolTip_("hheaAscender (positive value), should be the same as OS/2 sTypoAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in Mac office apps and together with the line gap value, also in Mac browsers.")
self.w.hheaDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaDesc.getNSTextField().setToolTip_("hheaDescender (negative value), should be the same as OS/2 sTypoDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.hheaGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaGap.getNSTextField().setToolTip_("hheaLineGap (positive value), should be the same as OS/2 sTypoLineGap. Should be either zero or a value for padding between lines that makes sense visually. Mac office apps insert this distance between the lines, Mac browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.hheaUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.hheaUpdate.getNSButton().setToolTip_("Will recalculate the hhea values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.useTypoMetrics = vanilla.CheckBox( (inset+70, linePos, -inset, 20), u"Use Typo Metrics (fsSelection bit 7)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.useTypoMetrics.getNSButton().setToolTip_("Should ALWAYS BE ON. Only uncheck if you really know what you are doing. If unchecked, line behaviour will be not consistent between apps and browsers because some apps prefer win values to sTypo values for determining line distances.")
self.w.useTypoMetricsUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.useTypoMetricsUpdate.getNSButton().setToolTip_("Will reset the checkbox to the left to ON, because it should ALWAYS be on. Strongly recommended.")
linePos += lineHeight*1.5
self.w.descriptionMeasurements = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Taking Measurements (see tooltips for info):", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.round = vanilla.CheckBox( (inset, linePos, 70, 20), u"Round by:", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.round.getNSButton().setToolTip_("Turn on if you want your values rounded. Recommended.")
self.w.roundValue = vanilla.EditText( (inset+75, linePos, 60, 19), "10", callback=self.SavePreferences, sizeStyle='small' )
self.w.roundValue.getNSTextField().setToolTip_("All value calculations will be rounded up to the next multiple of this value. Recommended: 10.")
linePos += lineHeight
self.w.includeAllMasters = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Include all masters (otherwise current master only)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.includeAllMasters.getNSButton().setToolTip_("If checked, all masters will be measured. If unchecked, only the current master will be measured. Since vertical metrics should be the same throughout all masters, it also makes sense to measure on all masters.")
linePos += lineHeight
self.w.respectMarkToBaseOffset = vanilla.CheckBox( (inset, linePos, -inset, 20), "Include mark-to-base offset for OS/2 usWin", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.respectMarkToBaseOffset.getNSButton().setToolTip_("If checked will calculate the maximum possible height that can be reached with top-anchored marks, and the lowest depth with bottom-anchored marks, and use those values for the OS/2 usWin values. Strongly recommended for making fonts work on Windows if they rely on mark-to-base positioning (e.g. Arabic). Respects the ‘Limit to Script’ setting.")
linePos += lineHeight
self.w.ignoreNonExporting = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Ignore non-exporting glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.ignoreNonExporting.getNSButton().setToolTip_("If checked, glyphs that do not export will be excluded from measuring. Recommended. (Ignored for calculating the OS/2 usWin values.)")
linePos += lineHeight
self.w.preferSelectedGlyphs = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Limit to selected glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferSelectedGlyphs.getNSButton().setToolTip_("If checked, only the current glyphs will be measured. Can be combined with the other Limit options. May make sense if you want your metrics to be e.g. Latin-CE-centric.")
linePos += lineHeight
self.w.preferScript = vanilla.CheckBox( (inset, linePos, inset+110, 20), u"Limit to script:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferScript.getNSButton().setToolTip_("If checked, only measures glyphs belonging to the selected writing system. Can be combined with the other Limit options. (Ignored for calculating the OS/2 usWin values, but respected for mark-to-base calculation.)")
self.w.preferScriptPopup = vanilla.PopUpButton( (inset+115, linePos+1, -inset-25, 17), (u"latin", u"greek"), sizeStyle='small', callback=self.SavePreferences )
self.w.preferScriptPopup.getNSPopUpButton().setToolTip_("Choose a writing system ('script') you want the measurements to be limited to. May make sense to ignore other scripts if the font is intended only for e.g. Cyrillic. Does not apply to OS/2 usWin")
self.w.preferScriptUpdate = vanilla.SquareButton( (-inset-20, linePos+1, -inset, 18), u"↺", sizeStyle='small', callback=self.update )
self.w.preferScriptUpdate.getNSButton().setToolTip_("Update the script popup to the left with all scripts (writing systems) found in the current font.")
linePos += lineHeight
self.w.preferCategory | |
self.c.close()
def testPutline(self):
putline = self.c.putline
query = self.c.query
data = list(enumerate("apple pear plum cherry banana".split()))
query("copy test from stdin")
try:
for i, v in data:
putline("%d\t%s\n" % (i, v))
putline("\\.\n")
finally:
self.c.endcopy()
r = query("select * from test").getresult()
self.assertEqual(r, data)
def testPutlineBytesAndUnicode(self):
putline = self.c.putline
query = self.c.query
try:
query("select 'käse+würstel'")
except (pg.DataError, pg.NotSupportedError):
self.skipTest('database does not support utf8')
query("copy test from stdin")
try:
putline(u"47\tkäse\n".encode('utf8'))
putline("35\twürstel\n")
putline(b"\\.\n")
finally:
self.c.endcopy()
r = query("select * from test").getresult()
self.assertEqual(r, [(47, 'käse'), (35, 'würstel')])
def testGetline(self):
getline = self.c.getline
query = self.c.query
data = list(enumerate("apple banana pear plum strawberry".split()))
n = len(data)
self.c.inserttable('test', data)
query("copy test to stdout")
try:
for i in range(n + 2):
v = getline()
if i < n:
self.assertEqual(v, '%d\t%s' % data[i])
elif i == n:
self.assertEqual(v, '\\.')
else:
self.assertIsNone(v)
finally:
try:
self.c.endcopy()
except IOError:
pass
def testGetlineBytesAndUnicode(self):
getline = self.c.getline
query = self.c.query
try:
query("select 'käse+würstel'")
except (pg.DataError, pg.NotSupportedError):
self.skipTest('database does not support utf8')
data = [(54, u'käse'.encode('utf8')), (73, u'würstel')]
self.c.inserttable('test', data)
query("copy test to stdout")
try:
v = getline()
self.assertIsInstance(v, str)
self.assertEqual(v, '54\tkäse')
v = getline()
self.assertIsInstance(v, str)
self.assertEqual(v, '73\twürstel')
self.assertEqual(getline(), '\\.')
self.assertIsNone(getline())
finally:
try:
self.c.endcopy()
except IOError:
pass
def testParameterChecks(self):
self.assertRaises(TypeError, self.c.putline)
self.assertRaises(TypeError, self.c.getline, 'invalid')
self.assertRaises(TypeError, self.c.endcopy, 'invalid')
class TestNotificatons(unittest.TestCase):
"""Test notification support."""
def setUp(self):
self.c = connect()
def tearDown(self):
self.doCleanups()
self.c.close()
def testGetNotify(self):
getnotify = self.c.getnotify
query = self.c.query
self.assertIsNone(getnotify())
query('listen test_notify')
try:
self.assertIsNone(self.c.getnotify())
query("notify test_notify")
r = getnotify()
self.assertIsInstance(r, tuple)
self.assertEqual(len(r), 3)
self.assertIsInstance(r[0], str)
self.assertIsInstance(r[1], int)
self.assertIsInstance(r[2], str)
self.assertEqual(r[0], 'test_notify')
self.assertEqual(r[2], '')
self.assertIsNone(self.c.getnotify())
query("notify test_notify, 'test_payload'")
r = getnotify()
self.assertTrue(isinstance(r, tuple))
self.assertEqual(len(r), 3)
self.assertIsInstance(r[0], str)
self.assertIsInstance(r[1], int)
self.assertIsInstance(r[2], str)
self.assertEqual(r[0], 'test_notify')
self.assertEqual(r[2], 'test_payload')
self.assertIsNone(getnotify())
finally:
query('unlisten test_notify')
def testGetNoticeReceiver(self):
self.assertIsNone(self.c.get_notice_receiver())
def testSetNoticeReceiver(self):
self.assertRaises(TypeError, self.c.set_notice_receiver, 42)
self.assertRaises(TypeError, self.c.set_notice_receiver, 'invalid')
self.assertIsNone(self.c.set_notice_receiver(lambda notice: None))
self.assertIsNone(self.c.set_notice_receiver(None))
def testSetAndGetNoticeReceiver(self):
r = lambda notice: None
self.assertIsNone(self.c.set_notice_receiver(r))
self.assertIs(self.c.get_notice_receiver(), r)
self.assertIsNone(self.c.set_notice_receiver(None))
self.assertIsNone(self.c.get_notice_receiver())
def testNoticeReceiver(self):
self.addCleanup(self.c.query, 'drop function bilbo_notice();')
self.c.query('''create function bilbo_notice() returns void AS $$
begin
raise warning 'Bilbo was here!';
end;
$$ language plpgsql''')
received = {}
def notice_receiver(notice):
for attr in dir(notice):
if attr.startswith('__'):
continue
value = getattr(notice, attr)
if isinstance(value, str):
value = value.replace('WARNUNG', 'WARNING')
received[attr] = value
self.c.set_notice_receiver(notice_receiver)
self.c.query('select bilbo_notice()')
self.assertEqual(received, dict(
pgcnx=self.c, message='WARNING: Bilbo was here!\n',
severity='WARNING', primary='Bilbo was here!',
detail=None, hint=None))
class TestConfigFunctions(unittest.TestCase):
"""Test the functions for changing default settings.
To test the effect of most of these functions, we need a database
connection. That's why they are covered in this test module.
"""
def setUp(self):
self.c = connect()
self.c.query("set client_encoding=utf8")
self.c.query('set bytea_output=hex')
self.c.query("set lc_monetary='C'")
def tearDown(self):
self.c.close()
def testGetDecimalPoint(self):
point = pg.get_decimal_point()
# error if a parameter is passed
self.assertRaises(TypeError, pg.get_decimal_point, point)
self.assertIsInstance(point, str)
self.assertEqual(point, '.') # the default setting
pg.set_decimal_point(',')
try:
r = pg.get_decimal_point()
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertEqual(r, ',')
pg.set_decimal_point("'")
try:
r = pg.get_decimal_point()
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertEqual(r, "'")
pg.set_decimal_point('')
try:
r = pg.get_decimal_point()
finally:
pg.set_decimal_point(point)
self.assertIsNone(r)
pg.set_decimal_point(None)
try:
r = pg.get_decimal_point()
finally:
pg.set_decimal_point(point)
self.assertIsNone(r)
def testSetDecimalPoint(self):
d = pg.Decimal
point = pg.get_decimal_point()
self.assertRaises(TypeError, pg.set_decimal_point)
# error if decimal point is not a string
self.assertRaises(TypeError, pg.set_decimal_point, 0)
# error if more than one decimal point passed
self.assertRaises(TypeError, pg.set_decimal_point, '.', ',')
self.assertRaises(TypeError, pg.set_decimal_point, '.,')
# error if decimal point is not a punctuation character
self.assertRaises(TypeError, pg.set_decimal_point, '0')
query = self.c.query
# check that money values are interpreted as decimal values
# only if decimal_point is set, and that the result is correct
# only if it is set suitable for the current lc_monetary setting
select_money = "select '34.25'::money"
proper_money = d('34.25')
bad_money = d('3425')
en_locales = 'en', 'en_US', 'en_US.utf8', 'en_US.UTF-8'
en_money = '$34.25', '$ 34.25', '34.25$', '34.25 $', '34.25 Dollar'
de_locales = 'de', 'de_DE', 'de_DE.utf8', 'de_DE.UTF-8'
de_money = ('34,25€', '34,25 €', '€34,25', '€ 34,25',
'EUR34,25', 'EUR 34,25', '34,25 EUR', '34,25 Euro', '34,25 DM')
# first try with English localization (using the point)
for lc in en_locales:
try:
query("set lc_monetary='%s'" % lc)
except pg.DataError:
pass
else:
break
else:
self.skipTest("cannot set English money locale")
try:
r = query(select_money)
except pg.DataError:
# this can happen if the currency signs cannot be
# converted using the encoding of the test database
self.skipTest("database does not support English money")
pg.set_decimal_point(None)
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertIn(r, en_money)
r = query(select_money)
pg.set_decimal_point('')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertIn(r, en_money)
r = query(select_money)
pg.set_decimal_point('.')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, d)
self.assertEqual(r, proper_money)
r = query(select_money)
pg.set_decimal_point(',')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, d)
self.assertEqual(r, bad_money)
r = query(select_money)
pg.set_decimal_point("'")
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, d)
self.assertEqual(r, bad_money)
# then try with German localization (using the comma)
for lc in de_locales:
try:
query("set lc_monetary='%s'" % lc)
except pg.DataError:
pass
else:
break
else:
self.skipTest("cannot set German money locale")
select_money = select_money.replace('.', ',')
try:
r = query(select_money)
except pg.DataError:
self.skipTest("database does not support English money")
pg.set_decimal_point(None)
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertIn(r, de_money)
r = query(select_money)
pg.set_decimal_point('')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, str)
self.assertIn(r, de_money)
r = query(select_money)
pg.set_decimal_point(',')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertIsInstance(r, d)
self.assertEqual(r, proper_money)
r = query(select_money)
pg.set_decimal_point('.')
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertEqual(r, bad_money)
r = query(select_money)
pg.set_decimal_point("'")
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal_point(point)
self.assertEqual(r, bad_money)
def testGetDecimal(self):
decimal_class = pg.get_decimal()
# error if a parameter is passed
self.assertRaises(TypeError, pg.get_decimal, decimal_class)
self.assertIs(decimal_class, pg.Decimal) # the default setting
pg.set_decimal(int)
try:
r = pg.get_decimal()
finally:
pg.set_decimal(decimal_class)
self.assertIs(r, int)
r = pg.get_decimal()
self.assertIs(r, decimal_class)
def testSetDecimal(self):
decimal_class = pg.get_decimal()
# error if no parameter is passed
self.assertRaises(TypeError, pg.set_decimal)
query = self.c.query
try:
r = query("select 3425::numeric")
except pg.DatabaseError:
self.skipTest('database does not support numeric')
r = r.getresult()[0][0]
self.assertIsInstance(r, decimal_class)
self.assertEqual(r, decimal_class('3425'))
r = query("select 3425::numeric")
pg.set_decimal(int)
try:
r = r.getresult()[0][0]
finally:
pg.set_decimal(decimal_class)
self.assertNotIsInstance(r, decimal_class)
self.assertIsInstance(r, int)
self.assertEqual(r, int(3425))
def testGetBool(self):
use_bool = pg.get_bool()
# error if a parameter is passed
self.assertRaises(TypeError, pg.get_bool, use_bool)
self.assertIsInstance(use_bool, bool)
self.assertIs(use_bool, True) # the default setting
pg.set_bool(False)
try:
r = pg.get_bool()
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, bool)
self.assertIs(r, False)
pg.set_bool(True)
try:
r = pg.get_bool()
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, bool)
self.assertIs(r, True)
pg.set_bool(0)
try:
r = pg.get_bool()
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, bool)
self.assertIs(r, False)
pg.set_bool(1)
try:
r = pg.get_bool()
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, bool)
self.assertIs(r, True)
def testSetBool(self):
use_bool = pg.get_bool()
# error if no parameter is passed
self.assertRaises(TypeError, pg.set_bool)
query = self.c.query
try:
r = query("select true::bool")
except pg.ProgrammingError:
self.skipTest('database does not support bool')
r = r.getresult()[0][0]
self.assertIsInstance(r, bool)
self.assertEqual(r, True)
r = query("select true::bool")
pg.set_bool(False)
try:
r = r.getresult()[0][0]
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, str)
self.assertIs(r, 't')
r = query("select true::bool")
pg.set_bool(True)
try:
r = r.getresult()[0][0]
finally:
pg.set_bool(use_bool)
self.assertIsInstance(r, bool)
self.assertIs(r, True)
def testGetByteEscaped(self):
bytea_escaped = pg.get_bytea_escaped()
# error if a parameter is passed
self.assertRaises(TypeError, pg.get_bytea_escaped, bytea_escaped)
self.assertIsInstance(bytea_escaped, bool)
self.assertIs(bytea_escaped, False) # the default setting
pg.set_bytea_escaped(True)
try:
r = pg.get_bytea_escaped()
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, bool)
self.assertIs(r, True)
pg.set_bytea_escaped(False)
try:
r = pg.get_bytea_escaped()
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, bool)
self.assertIs(r, False)
pg.set_bytea_escaped(1)
try:
r = pg.get_bytea_escaped()
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, bool)
self.assertIs(r, True)
pg.set_bytea_escaped(0)
try:
r = pg.get_bytea_escaped()
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, bool)
self.assertIs(r, False)
def testSetByteaEscaped(self):
bytea_escaped = pg.get_bytea_escaped()
# error if no parameter is passed
self.assertRaises(TypeError, pg.set_bytea_escaped)
query = self.c.query
try:
r = query("select 'data'::bytea")
except pg.ProgrammingError:
self.skipTest('database does not support bytea')
r = r.getresult()[0][0]
self.assertIsInstance(r, bytes)
self.assertEqual(r, b'data')
r = query("select 'data'::bytea")
pg.set_bytea_escaped(True)
try:
r = r.getresult()[0][0]
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, str)
self.assertEqual(r, '\\x64617461')
r = query("select 'data'::bytea")
pg.set_bytea_escaped(False)
try:
r = r.getresult()[0][0]
finally:
pg.set_bytea_escaped(bytea_escaped)
self.assertIsInstance(r, bytes)
self.assertEqual(r, b'data')
def testGetNamedresult(self):
namedresult = pg.get_namedresult()
# error if a parameter is passed
self.assertRaises(TypeError, pg.get_namedresult, namedresult)
self.assertIs(namedresult, pg._namedresult) # the default setting
def testSetNamedresult(self):
namedresult = pg.get_namedresult()
self.assertTrue(callable(namedresult))
query = self.c.query
r = query("select 1 as x, 2 as y").namedresult()[0]
self.assertIsInstance(r, tuple)
self.assertEqual(r, (1, 2))
self.assertIsNot(type(r), tuple)
self.assertEqual(r._fields, ('x', 'y'))
self.assertEqual(r._asdict(), {'x': 1, 'y': 2})
self.assertEqual(r.__class__.__name__, 'Row')
def listresult(q):
return [list(row) for row in q.getresult()]
pg.set_namedresult(listresult)
try:
r = pg.get_namedresult()
self.assertIs(r, listresult)
r = query("select 1 as x, 2 as y").namedresult()[0]
self.assertIsInstance(r, list)
self.assertEqual(r, | |
<reponame>baitsanape/saleor
import graphene
from saleor.app.error_codes import AppErrorCode
from saleor.app.models import App, AppToken
from saleor.graphql.core.enums import PermissionEnum
from .utils import assert_no_permission, get_graphql_content
APP_CREATE_MUTATION = """
mutation AppCreate(
$name: String, $is_active: Boolean $permissions: [PermissionEnum]){
appCreate(input:
{name: $name, isActive: $is_active, permissions: $permissions})
{
authToken
app{
permissions{
code
name
}
id
isActive
name
tokens{
authToken
}
}
appErrors{
field
message
code
permissions
}
}
}
"""
def test_app_create_mutation(
permission_manage_apps, permission_manage_products, staff_api_client, staff_user,
):
query = APP_CREATE_MUTATION
staff_user.user_permissions.add(permission_manage_products)
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
app_data = content["data"]["appCreate"]["app"]
default_token = content["data"]["appCreate"]["authToken"]
app = App.objects.get()
assert app_data["isActive"] == app.is_active
assert app_data["name"] == app.name
assert list(app.permissions.all()) == [permission_manage_products]
assert default_token == app.tokens.get().auth_token
def test_app_create_mutation_for_app(
permission_manage_apps, permission_manage_products, app_api_client, staff_user,
):
query = APP_CREATE_MUTATION
requestor = app_api_client.app
requestor.permissions.add(permission_manage_apps, permission_manage_products)
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = app_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
app_data = content["data"]["appCreate"]["app"]
default_token = content["data"]["appCreate"]["authToken"]
app = App.objects.exclude(pk=requestor.pk).get()
assert app_data["isActive"] == app.is_active
assert app_data["name"] == app.name
assert list(app.permissions.all()) == [permission_manage_products]
assert default_token == app.tokens.get().auth_token
def test_app_create_mutation_out_of_scope_permissions(
permission_manage_apps, permission_manage_products, staff_api_client, staff_user,
):
"""Ensure user can't create app with permissions out of user's scope.
Ensure superuser pass restrictions.
"""
query = APP_CREATE_MUTATION
staff_user.user_permissions.add(permission_manage_apps)
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = staff_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appCreate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_PRODUCTS.name]
def test_app_create_mutation_superuser_can_create_app_with_any_perms(
permission_manage_apps, permission_manage_products, superuser_api_client,
):
"""Ensure superuser can create app with any permissions."""
query = APP_CREATE_MUTATION
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = superuser_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
app_data = content["data"]["appCreate"]["app"]
default_token = content["data"]["appCreate"]["authToken"]
app = App.objects.get()
assert app_data["isActive"] == app.is_active
assert app_data["name"] == app.name
assert list(app.permissions.all()) == [permission_manage_products]
assert default_token == app.tokens.get().auth_token
def test_app_create_mutation_for_app_out_of_scope_permissions(
permission_manage_apps, permission_manage_products, app_api_client, staff_user,
):
query = APP_CREATE_MUTATION
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appCreate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_PRODUCTS.name]
def test_app_create_mutation_no_permissions(
permission_manage_apps, permission_manage_products, staff_api_client, staff_user,
):
query = APP_CREATE_MUTATION
variables = {
"name": "New integration",
"is_active": True,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = staff_api_client.post_graphql(query, variables=variables)
assert_no_permission(response)
APP_UPDATE_MUTATION = """
mutation AppUpdate($id: ID!, $is_active: Boolean,
$permissions: [PermissionEnum]){
appUpdate(id: $id,
input:{isActive: $is_active, permissions:$permissions}){
app{
isActive
id
permissions{
code
name
}
tokens{
authToken
}
name
}
appErrors{
field
message
code
permissions
}
}
}
"""
def test_app_update_mutation(
app,
permission_manage_apps,
permission_manage_products,
permission_manage_users,
staff_api_client,
staff_user,
):
query = APP_UPDATE_MUTATION
staff_user.user_permissions.add(permission_manage_products, permission_manage_users)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
app_data = content["data"]["appUpdate"]["app"]
tokens_data = app_data["tokens"]
app.refresh_from_db()
tokens = app.tokens.all()
assert app_data["isActive"] == app.is_active
assert app.is_active is False
assert len(tokens_data) == 1
assert tokens_data[0]["authToken"] == tokens.get().auth_token[-4:]
assert set(app.permissions.all()) == {
permission_manage_products,
permission_manage_users,
}
def test_app_update_mutation_for_app(
permission_manage_apps,
permission_manage_products,
permission_manage_orders,
permission_manage_users,
app_api_client,
):
query = APP_UPDATE_MUTATION
app = App.objects.create(name="New_app")
app.permissions.add(permission_manage_orders)
AppToken.objects.create(app=app)
requestor = app_api_client.app
requestor.permissions.add(
permission_manage_apps,
permission_manage_products,
permission_manage_users,
permission_manage_orders,
)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = app_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
app_data = content["data"]["appUpdate"]["app"]
tokens_data = app_data["tokens"]
app.refresh_from_db()
tokens = app.tokens.all()
assert app_data["isActive"] == app.is_active
assert app.is_active is False
assert len(tokens_data) == 1
assert tokens_data[0]["authToken"] == tokens.get().auth_token[-4:]
assert set(app.permissions.all()) == {
permission_manage_products,
permission_manage_users,
}
def test_app_update_mutation_out_of_scope_permissions(
app,
permission_manage_apps,
permission_manage_products,
permission_manage_users,
staff_api_client,
staff_user,
):
"""Ensure user cannot add permissions to app witch he doesn't have."""
query = APP_UPDATE_MUTATION
staff_user.user_permissions.add(permission_manage_apps, permission_manage_products)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = staff_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_USERS.name]
def test_app_update_mutation_superuser_can_add_any_permissions_to_app(
app,
permission_manage_apps,
permission_manage_products,
permission_manage_users,
superuser_api_client,
):
"""Ensure superuser can add any permissions to app."""
query = APP_UPDATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = superuser_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
app_data = data["app"]
tokens_data = app_data["tokens"]
app.refresh_from_db()
tokens = app.tokens.all()
assert app_data["isActive"] == app.is_active
assert app.is_active is False
assert len(tokens_data) == 1
assert tokens_data[0]["authToken"] == tokens.get().auth_token[-4:]
assert set(app.permissions.all()) == {
permission_manage_products,
permission_manage_users,
}
def test_app_update_mutation_for_app_out_of_scope_permissions(
permission_manage_apps,
permission_manage_products,
permission_manage_orders,
permission_manage_users,
app_api_client,
):
app = App.objects.create(name="New_app")
query = APP_UPDATE_MUTATION
requestor = app_api_client.app
requestor.permissions.add(
permission_manage_apps, permission_manage_products, permission_manage_orders,
)
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = app_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "permissions"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_PERMISSION.name
assert error["permissions"] == [PermissionEnum.MANAGE_USERS.name]
def test_app_update_mutation_out_of_scope_app(
app,
permission_manage_apps,
permission_manage_products,
permission_manage_orders,
permission_manage_users,
staff_api_client,
staff_user,
):
"""Ensure user cannot manage app with wider permission scope."""
query = APP_UPDATE_MUTATION
staff_user.user_permissions.add(
permission_manage_apps, permission_manage_products, permission_manage_users,
)
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = staff_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "id"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_APP.name
def test_app_update_mutation_superuser_can_update_any_app(
app,
permission_manage_apps,
permission_manage_products,
permission_manage_orders,
permission_manage_users,
superuser_api_client,
):
"""Ensure superuser can manage any app."""
query = APP_UPDATE_MUTATION
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = superuser_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
app_data = data["app"]
tokens_data = app_data["tokens"]
app.refresh_from_db()
tokens = app.tokens.all()
assert app_data["isActive"] == app.is_active
assert app.is_active is False
assert len(tokens_data) == 1
assert tokens_data[0]["authToken"] == tokens.get().auth_token[-4:]
assert set(app.permissions.all()) == {
permission_manage_products,
permission_manage_users,
}
def test_app_update_mutation_for_app_out_of_scope_app(
permission_manage_apps,
permission_manage_products,
permission_manage_orders,
permission_manage_users,
app_api_client,
):
app = App.objects.create(name="New_app")
query = APP_UPDATE_MUTATION
requestor = app_api_client.app
requestor.permissions.add(
permission_manage_apps, permission_manage_products, permission_manage_users,
)
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [
PermissionEnum.MANAGE_PRODUCTS.name,
PermissionEnum.MANAGE_USERS.name,
],
}
response = app_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appUpdate"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "id"
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_APP.name
def test_app_update_no_permission(app, staff_api_client, staff_user):
query = APP_UPDATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
"is_active": False,
"permissions": [PermissionEnum.MANAGE_PRODUCTS.name],
}
response = staff_api_client.post_graphql(query, variables=variables)
assert_no_permission(response)
APP_DELETE_MUTATION = """
mutation appDelete($id: ID!){
appDelete(id: $id){
appErrors{
field
message
code
}
app{
name
}
}
}
"""
def test_app_delete(
staff_api_client, staff_user, app, permission_manage_orders, permission_manage_apps,
):
query = APP_DELETE_MUTATION
app.permissions.add(permission_manage_orders)
staff_user.user_permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {"id": id}
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appDelete"]
assert data["app"]
assert not data["appErrors"]
assert not App.objects.filter(id=app.id)
def test_app_delete_for_app(
app_api_client, permission_manage_orders, permission_manage_apps,
):
requestor = app_api_client.app
app = App.objects.create(name="New_app")
query = APP_DELETE_MUTATION
app.permissions.add(permission_manage_orders)
requestor.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {"id": id}
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appDelete"]
assert data["app"]
assert not data["appErrors"]
assert not App.objects.filter(id=app.id).exists()
def test_app_delete_out_of_scope_app(
staff_api_client, staff_user, app, permission_manage_apps, permission_manage_orders,
):
"""Ensure user can't delete app with wider scope of permissions."""
query = APP_DELETE_MUTATION
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {"id": id}
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appDelete"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == 1
error = errors[0]
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_APP.name
assert error["field"] == "id"
def test_app_delete_superuser_can_delete_any_app(
superuser_api_client, app, permission_manage_apps, permission_manage_orders,
):
"""Ensure superuser can delete app with any scope of permissions."""
query = APP_DELETE_MUTATION
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {"id": id}
response = superuser_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appDelete"]
assert data["app"]
assert not data["appErrors"]
assert not App.objects.filter(id=app.id).exists()
def test_app_delete_for_app_out_of_scope_app(
app_api_client, permission_manage_orders, permission_manage_apps,
):
app = App.objects.create(name="New_app")
query = APP_DELETE_MUTATION
app.permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("App", app.id)
variables = {"id": id}
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appDelete"]
errors = data["appErrors"]
assert not data["app"]
assert len(errors) == | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mapping reads to reference genome
1. to spike-in genome
2. to repbase
3. to rRNA, tRNA, MT
4. to genome
Note:
1. filtering unique mapped reads
STAR --outFilterMultimapNmax 1
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "2018 by <NAME> <<EMAIL>>"
__license__ = "MIT"
__email__ = "<EMAIL>"
__version__ = "0.1"
import os
import sys
import re
import json
import glob
import argparse
import shlex
import subprocess
import pandas as pd
import binascii
import logging
import gzip
import pysam
import pybedtools
from TRIBEpipe.helper import *
logging.basicConfig(format = '[%(asctime)s] %(message)s',
datefmt = '%Y-%m-%d %H:%M:%S',
level = logging.DEBUG)
def get_args():
## parsing arguments
parser = argparse.ArgumentParser(
prog = 'map',
description = 'Mapping RNA-seq reads to genome',
epilog = 'Example: map -i f1.fq f2.fq -n demo -o output')
parser.add_argument('-i', nargs = '+', required = True, metavar = 'INPUT',
type = argparse.FileType('r'),
help = 'Sequencing reads in FASTQ format, 1-4 files.')
parser.add_argument('-n', required = True, metavar = 'NAME',
help = 'Name of the experiment')
parser.add_argument('-o', required = True, default = None,
metavar = 'OUTPUT', help = 'The directory to save results.')
parser.add_argument('-g', default = 'hg19', metavar = 'GENOME',
help = 'Reference genome : dm3, dm6, hg19, GRCh38, mm10, GRCm38')
parser.add_argument('-x', required = True, metavar = 'index',
help = 'Index of the genome for alignment tools')
parser.add_argument('-t', required = True, default = 'bowtie',
metavar = 'Aligner',
help = 'Aligner for the mapping, bowtie, bowtie2, STAR')
parser.add_argument('-p', default = 1, metavar = 'Threads', type = int,
help = 'Number of threads to launch, default [1].')
parser.add_argument('--rmdup', action = 'store_true',
help = 'remove PCR duplicates using Picard, if specified')
parser.add_argument('--path_data',
help='The directory of genome files, default: \
[$HOME/data/genome/]')
parser.add_argument('--overwrite', action='store_true',
help='if spcified, overwrite exists file')
args = parser.parse_args()
return args
def bowtie2_se(fn, idx, path_out, para=1, multi_cores=1, overwrite=False):
"""
Mapping SE reads to idx using Bowtie
filter uniquely mapped reads by samtools:
samtools view -bhS -q 10 in.bam > out.bam
"""
assert isinstance(fn, str)
assert os.path.exists(fn)
assert is_idx(idx, 'bowtie2')
path_out = os.path.dirname(fn) if path_out is None else path_out
assert is_path(path_out)
assert isinstance(para, int)
## parameters
para_v = {1: '--sensitive', 2: '--local'}
para_bowtie2 = para_v[para] if para in para_v else ''
fn_type = seq_type(fn)
if fn_type == 'fasta':
para_bowtie2 += ' -f'
elif fn_type == 'fastq':
para_bowtie2 += ' -q'
else:
raise ValueError('unknown type of reads')
## prefix
fn_prefix = file_prefix(fn)[0]
fn_prefix = re.sub('\.clean|\.nodup|\.cut', '', fn_prefix)
# fn_prefix = re.sub('_[12]|_R[12]$', '', fn_prefix)
idx_name = os.path.basename(idx)
fn_unmap_file = os.path.join(path_out, '%s.not_%s.%s' % (fn_prefix, idx_name, fn_type))
fn_map_prefix = os.path.join(path_out, fn_prefix)
fn_map_bam = fn_map_prefix + '.map_%s.bam' % idx_name
fn_map_bed = fn_map_prefix + '.map_%s.bed' % idx_name
fn_map_log = fn_map_prefix + '.map_%s.bowtie2.log' % idx_name
if os.path.exists(fn_map_bam) and os.path.exists(fn_unmap_file) and overwrite is False:
logging.info('file exists: %s' % fn_map_bam)
else:
c1 = 'bowtie2 %s -p %s --mm --no-unal --un %s -x %s %s' % (para_bowtie2,
multi_cores, fn_unmap_file, idx, fn)
c2 = 'samtools view -bhS -q 10 -F 0x4 -@ %s -' % multi_cores
c3 = 'samtools sort -@ %s -o %s -' % (multi_cores, fn_map_bam)
with open(fn_map_log, 'wt') as fo:
p1 = subprocess.Popen(shlex.split(c1), stdout=subprocess.PIPE, stderr=fo)
p2 = subprocess.Popen(shlex.split(c2), stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split(c3), stdin=p2.stdout)
p4 = p3.communicate()
pysam.index(fn_map_bam)
pybedtools.BedTool(fn_map_bam).bam_to_bed().saveas(fn_map_bed)
## statistics
d = bowtie2_log_parser(fn_map_log)
return [fn_map_bam, fn_unmap_file]
def star_se(fn, idx, path_out, para=1, multi_cores=1, overwrite=False):
"""
mapping single read to one index using STAR
Input: fastq|a
Output: bam (sorted), unmapped reads
#
filtering unique mapped reads by samtools
#
STAR --runMode alignReads \
--genomeDir /path/to/genome \
--readFilesIn /path/to/reads \
--readFilesCommand cat \
--outFileNamePrefix /name \
--runThreadN 8 \
--limitOutSAMoneReadBytes 1000000 \
--genomeLoad LoadAndRemove \
--limitBAMsortRAM 10000000000 \
--outSAMtype BAM SortedByCoordinate \
--outFilterMismatchNoverLMax 0.05 \
--seedSearchStartLmax 20
STAR --runThreadN 8 \
--outFilterMismatchNoverLmax 0.07 \
--outFileNamePrefix $prefix"_" \
--outFilterMatchNmin 16 \
--outFilterMultimapNmax 1 \
--genomeDir $star_indices \
--readFilesIn $input
"""
assert isinstance(fn, str)
assert os.path.exists(fn)
assert is_idx(idx, 'star')
path_out = os.path.dirname(fn) if path_out is None else path_out
assert is_path(path_out)
fn_type = seq_type(fn)
freader = 'zcat' if is_gz(fn) else '-'
## prefix
fn_prefix = file_prefix(fn)[0]
fn_prefix = re.sub('\.clean|\.nodup|\.cut', '', fn_prefix)
# fn_prefix = re.sub('_[12]|_R[12]$', '', fn_prefix)
idx_name = os.path.basename(idx)
fn_unmap_file = os.path.join(path_out, '%s.not_%s.%s' % (fn_prefix, idx_name, fn_type))
fn_map_prefix = os.path.join(path_out, fn_prefix)
fn_map_bam = fn_map_prefix + '.map_%s.bam' % idx_name
fn_map_bed = fn_map_prefix + '.map_%s.bed' % idx_name
fn_map_log = fn_map_prefix + '.map_%s.star.log' % idx_name
## skip exist files
if os.path.exists(fn_map_bam) and overwrite is False:
logging.info('file exists: %s' % fn_map_bam)
else:
c1 = 'STAR --runMode alignReads \
--genomeDir %s \
--readFilesIn %s \
--readFilesCommand %s \
--outFileNamePrefix %s \
--runThreadN %s \
--outFilterMismatchNoverLmax 0.07 \
--outFilterMultimapNmax 1 \
--limitOutSAMoneReadBytes 1000000 \
--genomeLoad LoadAndRemove \
--limitBAMsortRAM 10000000000 \
--outSAMtype BAM SortedByCoordinate \
--outReadsUnmapped Fastx' % (idx, fn, freader, fn_map_prefix,
multi_cores)
p1 = subprocess.run(shlex.split(c1))
# rename exists file
os.rename(fn_map_prefix + 'Aligned.sortedByCoord.out.bam', fn_map_bam)
os.rename(fn_map_prefix + 'Unmapped.out.mate1', fn_unmap_file)
os.rename(fn_map_prefix + 'Log.final.out', fn_map_log)
pysam.index(fn_map_bam)
d = star_log_parser(fn_map_log)
return [fn_map_bam, fn_unmap_file]
def map_se_batch(fn, idxes, path_out, para=1, multi_cores=1, aligner='STAR',
overwrite=False):
"""
mapping fastq to multiple indexes
"""
assert isinstance(fn, str)
assert os.path.exists(fn)
assert isinstance(idxes, list)
path_out = os.path.dirname(fn) if path_out is None else path_out
assert is_path(path_out)
if aligner.lower() == 'star':
align_se = star_se
elif aligner.lower() == 'bowtie2':
align_se = bowtie2_se
else:
raise ValueError('unknown aligner: %s' % aligner)
# iterate index
fn_bam_files = []
fn_input = fn
for idx in idxes:
para = 2 if idx is idxes[-1] else para
fn_bam_idx, fn_unmap_idx = align_se(fn_input, idx, path_out,
para=para,
multi_cores=multi_cores,
overwrite=overwrite)
fn_input = fn_unmap_idx
fn_bam_files.append(fn_bam_idx)
return fn_bam_files
def pcr_dup_remover(bam_in):
"""
remove PCR duplicates using Picard
"""
picard_jar = '/data/biosoft/picard/build/libs/picard.jar'
if not os.path.exists(picard_jar):
logging.error('file not found - picard.jar')
return None
bam_nodup = os.path.splitext(bam_in)[0] + '.nodup.bam'
metrics_nodup = os.path.splitext(bam_in)[0] + '.nodup.metrics'
log_nodup = os.path.splitext(bam_in)[0] + '.nodup.log'
c1 = 'java -Xmx8g -jar {} MarkDuplicates INPUT={} OUTPUT={} METRICS_FILE={} \
REMOVE_DUPLICATES=true ASSUME_SORTED=true'.format(picard_jar,
bam_in, bam_nodup, metrics_nodup)
if os.path.exists(bam_in) and not os.path.exists(bam_nodup):
with open(log_nodup, 'w') as fo:
p1 = subprocess.run(shlex.split(c1), stdout = fo, stderr = fo)
pysam.index(bam_nodup)
return bam_nodup
def align(fns, smp_name, path_out, genome, spikein=None, multi_cores=1,
aligner='STAR', path_data=None, overwrite=False):
"""
mapping reads to multiple indexes, one-by-one
"""
assert isinstance(fns, list)
assert isinstance(genome, str)
assert isinstance(smp_name, str)
# get indexes
sg = idx_picker(genome, path_data=path_data, aligner=aligner)
idxes = [sg]
if isinstance(spikein, str) and not spikein == genome:
sp = idx_picker(spikein, path_data=path_data, aligner=aligner) #
idxes.append(sp)
idxes = list(filter(None.__ne__, idxes)) # idxes
if len(idxes) == 0:
raise ValueError('genome index not exists: %s' % path_data)
# mapping se reads
fn_bam_files = []
# mapping
for fn in fns:
logging.info('mapping file: %s' % fn)
fn_prefix = file_prefix(fn)[0]
fn_prefix = re.sub('\.clean|\.nodup|\.cut', '', fn_prefix)
# fn_prefix = re.sub('_[12]$|_R[12]$', '', fn_prefix)
path_out_fn = os.path.join(path_out, fn_prefix)
b = map_se_batch(fn, idxes, path_out_fn, multi_cores=multi_cores,
aligner=aligner, overwrite=overwrite) # list
fn_bam_files.append(b) # bam files
rep_map_wrapper(path_out_fn)
# merge bam files
path_out_merge = os.path.join(path_out, smp_name)
merge_bam_files = []
if len(fn_bam_files) > 1:
assert is_path(path_out_merge)
for i in range(len(fn_bam_files[0])): # merge each sub-index
se_bam_files = [b[i] for b in fn_bam_files]
merge_suffix = str_common(se_bam_files, suffix=True)
merge_suffix = re.sub('^_[12]|_R[12]', '', merge_suffix)
merge_bam_name = smp_name + merge_suffix
merge_bam_file = os.path.join(path_out_merge, merge_bam_name)
merge_bed_file = re.sub('.bam$', '.bed', merge_bam_file)
if os.path.exists(merge_bam_file) and overwrite is False:
logging.info('file exists: %s' % merge_bam_name)
else:
tmp = bam_merge(se_bam_files, merge_bam_file)
pybedtools.BedTool(merge_bam_file).bam_to_bed().saveas(merge_bed_file)
merge_bam_files.append(merge_bam_file)
merge_map_wrapper(path_out_merge)
fn_bam_files.append(merge_bam_files)
# get genome mapping files (the last one)
genome_bam_files = [f[-1] for f in fn_bam_files]
# rename genome bam, to a shorter name
# remove "not_index." suffix
gbam_files = []
gbed_files = []
for i in range(len(genome_bam_files)):
bam_from = genome_bam_files[i]
bam_to = os.path.join(os.path.dirname(bam_from),
filename_shorter(bam_from))
if not os.path.exists(bam_to):
os.symlink(os.path.basename(bam_from), bam_to)
if not os.path.exists(bam_to + '.bai'):
if not os.path.exists(bam_from + '.bai'):
pysam.index(bam_from)
os.symlink(os.path.basename(bam_from) + '.bai',
bam_to + '.bai')
gbam_files.append(bam_to)
# rename .bed
bed_from = re.sub('.bam$', '.bed', bam_from)
bed_to = re.sub('.bam$', '.bed', bam_to)
if os.path.exists(bed_from) and not os.path.exists(bed_to):
os.symlink(os.path.basename(bed_from), bed_to)
gbed_files.append(bed_to)
return gbam_files # [gbam_files, gbed_files]
def main():
args = get_args()
fqs = [f.name for f in args.i]
smp_name = args.n
path_out = args.o
genome = args.g
multi_cores = args.p
aligner = args.t.lower()
rm_dup = args.rmdup
path_data = args.path_data
overwrite = args.overwrite
# mapping
p = map(fqs, smp_name, path_out, genome,
multi_cores=multi_cores, aligner=aligner,
path_data=path_data, overwrite=overwrite)
p_out = p # bam files
if args.rmdup:
px | |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.tione.v20191022 import tione_client as tione_client_v20191022
from tencentcloud.tione.v20191022 import models as models_v20191022
def doUpdateNotebookInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateNotebookInstanceRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateNotebookInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNotebookLifecycleScripts(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNotebookLifecycleScriptsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeNotebookLifecycleScripts(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStartNotebookInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartNotebookInstanceRequest()
model.from_json_string(json.dumps(args))
rsp = client.StartNotebookInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteNotebookInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteNotebookInstanceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteNotebookInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNotebookLifecycleScript(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNotebookLifecycleScriptRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeNotebookLifecycleScript(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreatePresignedNotebookInstanceUrl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreatePresignedNotebookInstanceUrlRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreatePresignedNotebookInstanceUrl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateCodeRepository(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateCodeRepositoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateCodeRepository(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateNotebookLifecycleScript(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateNotebookLifecycleScriptRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateNotebookLifecycleScript(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTrainingJob(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTrainingJobRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTrainingJob(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStopNotebookInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StopNotebookInstanceRequest()
model.from_json_string(json.dumps(args))
rsp = client.StopNotebookInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateNotebookInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateNotebookInstanceRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateNotebookInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCodeRepository(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCodeRepositoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCodeRepository(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateNotebookLifecycleScript(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateNotebookLifecycleScriptRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateNotebookLifecycleScript(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteCodeRepository(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteCodeRepositoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteCodeRepository(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNotebookSummary(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNotebookSummaryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeNotebookSummary(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTrainingJobs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTrainingJobsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTrainingJobs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteNotebookLifecycleScript(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TioneClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteNotebookLifecycleScriptRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteNotebookLifecycleScript(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, | |
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "1/2",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 45.0000000000,
"StartChamferOffset": 0.0300000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "General Screw Clearance",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": [],
"Thread_Diameter_Option": "InternalDiameterOption",
"Thread_Extent_Amount": 2.0000000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 1.1964000000,
"Thread_Family": "ACME LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 1.2624000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 1.5000000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 1.2750000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "NonStandardForm",
"Treatment": "NoTreatment",
"Type": "Simple",
"Unit": "StandardMeasurementUnit",
"name": "Simple 0.500",
},
"Simple 0.625": {
"Bottom_Angle": 0.0000000000,
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 2.0000000000,
"Diameter": 0.6875000000,
"EndChamferAngle": 45.0000000000,
"EndChamferOffset": 0.0300000000,
"Extent": "ThroughNextHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "5/8",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 45.0000000000,
"StartChamferOffset": 0.0300000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "General Screw Clearance",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": [],
"Thread_Diameter_Option": "InternalDiameterOption",
"Thread_Extent_Amount": 2.0000000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 1.1964000000,
"Thread_Family": "ACME LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 1.2624000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 1.5000000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 1.2750000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "NonStandardForm",
"Treatment": "NoTreatment",
"Type": "Simple",
"Unit": "StandardMeasurementUnit",
"name": "Simple 0.625",
},
"Simple 0.750": {
"Bottom_Angle": 0.0000000000,
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 2.0000000000,
"Diameter": 0.8125000000,
"EndChamferAngle": 45.0000000000,
"EndChamferOffset": 0.0300000000,
"Extent": "ThroughNextHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "3/4",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 45.0000000000,
"StartChamferOffset": 0.0300000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "General Screw Clearance",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": [],
"Thread_Diameter_Option": "InternalDiameterOption",
"Thread_Extent_Amount": 2.0000000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 1.1964000000,
"Thread_Family": "ACME LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 1.2624000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 1.5000000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 1.2750000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "NonStandardForm",
"Treatment": "NoTreatment",
"Type": "Simple",
"Unit": "StandardMeasurementUnit",
"name": "Simple 0.750",
},
"Simple 1.000": {
"Bottom_Angle": 0.0000000000,
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 2.0000000000,
"Diameter": 1.0312000000,
"EndChamferAngle": 45.0000000000,
"EndChamferOffset": 0.0600000000,
"Extent": "ThroughNextHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "1",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 45.0000000000,
"StartChamferOffset": 0.0600000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "General Screw Clearance",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": [],
"Thread_Diameter_Option": "InternalDiameterOption",
"Thread_Extent_Amount": 2.0000000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 1.1964000000,
"Thread_Family": "ACME LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 1.2624000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 1.5000000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 1.2750000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "NonStandardForm",
"Treatment": "NoTreatment",
"Type": "Simple",
"Unit": "StandardMeasurementUnit",
"name": "Simple 1.000",
},
"Threaded # 8-32 UNC": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.0041656",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "#8-32 UNC",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "#8-32 UNC",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00319278",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00330708",
"ThreadNominalDiameter": "0.0041656",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0034544",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded #10-24 UNC": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.004826",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "#10-24 UNC",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "#10-24 UNC",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00352806",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00368046",
"ThreadNominalDiameter": "0.004826",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0037973",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded #10-32 UNF": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.004826",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "#10-32 UNF",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "#10-32 UNF",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00385318",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00396748",
"ThreadNominalDiameter": "0.004826",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0040386",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded 0.250-20 UNC": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.00635",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "1/4-20 UNC",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "1/4-20 UNC",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00479298",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00497586",
"ThreadNominalDiameter": "0.00635",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0051054",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded 0.250-20 UNC LH": {
"Bottom_Angle": "118.0000000000",
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 1.0000000000,
"Diameter": 0.2500000000,
"EndChamferAngle": 0.0000000000,
"EndChamferOffset": 0.0000000000,
"Extent": "BlindHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "1/4-20 UNC LEFT HAND",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 0.0000000000,
"StartChamferOffset": 0.0000000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "Standard Thread",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": "1/4-20 UNC LEFT HAND",
"Thread_Diameter_Option": "TapDrillDiameterOption",
"Thread_Extent_Amount": 0.7500000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 0.1887000000,
"Thread_Family": "UNC LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 0.1959000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 0.2500000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 0.2010000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "UNCForm",
"Treatment": "TappedHole",
"Type": "Threaded",
"Unit": "StandardMeasurementUnit",
"name": "Threaded 0.250-20 UNC LH",
},
"Threaded 0.312-18 UNC": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.0079375",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "5/16-18 UNC",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "5/16-18 UNC",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00620522",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00641096",
"ThreadNominalDiameter": "0.0079375",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0065278",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded 0.312-18 UNC LH": {
"Bottom_Angle": "118.0000000000",
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 1.0000000000,
"Diameter": 0.3125000000,
"EndChamferAngle": 0.0000000000,
"EndChamferOffset": 0.0000000000,
"Extent": "BlindHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "5/16-18 UNC LEFT HAND",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 0.0000000000,
"StartChamferOffset": 0.0000000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "Standard Thread",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": "5/16-18 UNC LEFT HAND",
"Thread_Diameter_Option": "TapDrillDiameterOption",
"Thread_Extent_Amount": 0.7500000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 0.2443000000,
"Thread_Family": "UNC LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 0.2524000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 0.3125000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 0.2570000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "UNCForm",
"Treatment": "TappedHole",
"Type": "Threaded",
"Unit": "StandardMeasurementUnit",
"name": "Threaded 0.312-18 UNC LH",
},
"Threaded 0.375-16 UNC": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.009525",
"HoleType": "33",
"InsideEffectiveThreadLength": "0.0",
"OutsideEffectiveThreadLength": "0.0",
"Size": "3/8-16 UNC",
"Standard": "ANSI Inch - PT",
"SubType": "Standard Thread",
"Taper": "0.0104166666667",
"TaperDimType": "148",
"TaperLValue": "0.3048",
"TaperMethod": "151",
"TaperRValue": "0.003175",
"ThreadDepth": "0.0508",
"ThreadDepthMethod": "16",
"ThreadDescription": "3/8-16 UNC",
"ThreadDiameterOption": "1",
"ThreadExternalDiameter": "0.00757682",
"ThreadHeight": "0.0",
"ThreadMinorDiameter": "0.00780542",
"ThreadNominalDiameter": "0.009525",
"ThreadSetting": "164",
"ThreadTapDrillDiameter": "0.0079375",
"ThreadTaperAngle": "0.0436332312999",
"TreatmentType": "37",
"Units": "0",
"VBottomDimType": "145",
},
"Threaded 0.375-16 UNC LH": {
"Bottom_Angle": "118.0000000000",
"Bottom_Type": "VBottomDimToFlat",
"ChamferProp": 0,
"Counterbore_Bottom_Angle": 0.0000000000,
"Counterbore_Depth": 1.0000000000,
"Counterbore_Diameter": 1.6250000000,
"Counterbore_Location": "CounterboreProfileIsAtTop",
"Countersink_Angle": 82.0000000000,
"Countersink_Diameter": 1.6250000000,
"Countersink_Recess_Depth": 1.0000000000,
"Depth": 1.0000000000,
"Diameter": 0.3750000000,
"EndChamferAngle": 0.0000000000,
"EndChamferOffset": 0.0000000000,
"Extent": "BlindHole",
"Fit": "Normal",
"NeckChamferAngle": 0.0000000000,
"NeckChamferOffset": 0.0000000000,
"Orientation": "NormalToPlacementPlane",
"Size": "3/8-16 UNC LEFT HAND",
"Standard": "ANSI Inch - PT",
"StartChamferAngle": 0.0000000000,
"StartChamferOffset": 0.0000000000,
"Start_Taper_Location": "TaperDimAtBottom",
"Subtype": "Standard Thread",
"Taper_Angle_L_Value": 12.0000000000,
"Taper_Angle_R_Value": 0.1250000000,
"Taper_Angle_Type": "TaperRLRatio",
"Taper_Angle_Value": 0.0104166667,
"Thread_Designation": "3/8-16 UNC LEFT HAND",
"Thread_Diameter_Option": "TapDrillDiameterOption",
"Thread_Extent_Amount": 0.7500000000,
"Thread_Extent_Type": "FiniteThreadDepth",
"Thread_External_Diameter": 0.2983000000,
"Thread_Family": "UNC LH",
"Thread_Height": 0.0000000000,
"Thread_Inside_Effective_Length": 0.0000000000,
"Thread_Internal_Designation": [],
"Thread_Internal_Diameter": 0.3073000000,
"Thread_Location": "Inside",
"Thread_Nominal_Diameter": 0.3750000000,
"Thread_Offset": 0.0000000000,
"Thread_Outside_Effective_Length": 0.0000000000,
"Thread_Setting": "HoleSetting",
"Thread_TapDrill_Diameter": 0.3125000000,
"Thread_Taper_Angle": 0.0436332313,
"Thread_Type": "UNCForm",
"Treatment": "TappedHole",
"Type": "Threaded",
"Unit": "StandardMeasurementUnit",
"name": "Threaded 0.375-16 UNC LH",
},
"Threaded 0.375-24 UNF": {
"BottomAngle": "0.0",
"CounterboreDepth": "0.0127",
"CounterboreDiameter": "0.0127",
"CounterboreProfileLocationType": "149",
"CountersinkAngle": "82.0",
"CountersinkDiameter": "0.0127",
"Fit": "Normal",
"HeadClearance": "0.0127",
"HoleDiameter": "0.009525",
"HoleType": "33",
| |
3:
for unused_repeatCounter in range(repeatTimes):
self.addSignatures(symbols['repeat'])
elif repeatTimes >= 3: # 17.3 -- repeat plus number.
self.addSignatures(symbols['repeat'] + basic.numberToBraille(repeatTimes))
# noinspection PyAttributeOutsideInit
self.lastNote = None # this is set up to force an octave symbol on next note
def extractSignatureGrouping(self):
'''
Extracts a key signature, time signature, and possibly an outgoing key signature
from the currentGroupingKey and adds it to the BrailleText object.
'''
keySignature = None
timeSignature = None
cgk = self.currentGroupingKey
noteGrouping = self._groupingDict.get(cgk)
if len(noteGrouping) >= 2:
keySignature, timeSignature = noteGrouping[0], noteGrouping[1]
elif len(noteGrouping) == 1:
keyOrTimeSig = self._groupingDict.get(self.currentGroupingKey)[0]
if isinstance(keyOrTimeSig, key.KeySignature):
keySignature = keyOrTimeSig
else:
timeSignature = keyOrTimeSig
outgoingKeySig = None
if self.cancelOutgoingKeySig and keySignature is not None:
try:
outgoingKeySig = keySignature.outgoingKeySig
except AttributeError:
pass
brailleSig = basic.transcribeSignatures(keySignature, timeSignature, outgoingKeySig)
if brailleSig != '':
self.addSignatures(brailleSig)
def extractTempoTextGrouping(self):
'''
extracts a tempo text and processes it...
'''
self.groupingKeysToProcess.insert(0, self.currentGroupingKey)
if self.previousGroupingKey.affinity == Affinity.SIGNATURE:
self.groupingKeysToProcess.insert(0, self.previousGroupingKey)
self.extractHeading()
self.extractMeasureNumber()
def consolidate(self):
'''
Puts together certain types of elements according to the last digit of their key
(if it is the same as Affinity.NOTEGROUP or not.
>>> SK = braille.segment.SegmentKey
>>> BS1 = braille.segment.BrailleSegment()
>>> BS1[SK(ordinal=0, affinity=2)] = ['hi', 'hello', 'there']
>>> BS1[SK(ordinal=1, affinity=9)] = ['these', 'get']
>>> BS1[SK(ordinal=2, affinity=9)] = ['put', 'together']
>>> BS1[SK(ordinal=3, affinity=4)] = ['in', 'new', 'group']
>>> BS1[SK(ordinal=4, affinity=9)] = ['with', 'the', 'previous']
>>> BS2 = BS1.consolidate()
>>> for (groupingKey, groupingList) in sorted(BS2.items()):
... print(groupingKey, groupingList)
SegmentKey(measure=0, ordinal=0, affinity=2, hand=None) ['hi', 'hello', 'there']
SegmentKey(measure=0, ordinal=1, affinity=9, hand=None) these
get
put
together
SegmentKey(measure=0, ordinal=3, affinity=4, hand=None) ['in', 'new', 'group']
SegmentKey(measure=0, ordinal=4, affinity=9, hand=None) with
the
previous
'''
newSegment = BrailleSegment()
pngKey = None
for (groupingKey, groupingList) in sorted(self.items()):
if groupingKey.affinity != Affinity.NOTEGROUP:
newSegment[groupingKey] = groupingList
pngKey = None
else:
if pngKey is None:
pngKey = groupingKey
for item in groupingList:
newSegment[pngKey].append(item)
return newSegment
def addGroupingAttributes(self, **partKeywords):
'''
Modifies the attributes of all :class:`~music21.braille.segment.BrailleElementGrouping`
instances in a list of :class:`~music21.braille.segment.BrailleSegment` instances. The
necessary information is retrieved both by passing in partKeywords as an argument and
by taking into account the linear progression of the groupings and segments.
'''
currentKeySig = key.KeySignature(0)
currentTimeSig = meter.TimeSignature('4/4')
descendingChords = GROUPING_DESC_CHORDS
showClefSigns = GROUPING_SHOW_CLEFS
upperFirstInNoteFingering = GROUPING_UPPERFIRST_NOTEFINGERING
if 'showClefSigns' in partKeywords:
showClefSigns = partKeywords['showClefSigns']
if 'upperFirstInNoteFingering' in partKeywords:
upperFirstInNoteFingering = partKeywords['upperFirstInNoteFingering']
if 'descendingChords' in partKeywords:
descendingChords = partKeywords['descendingChords']
allGroupings = sorted(self.items())
(previousKey, previousList) = (None, None)
for (groupingKey, groupingList) in allGroupings:
if previousKey is not None:
if groupingKey.ordinal >= 1:
previousList.withHyphen = True
if (previousKey.ordinal == 0
and previousKey.affinity == Affinity.NOTEGROUP
and groupingKey.ordinal == 0
and groupingKey.affinity == Affinity.NOTEGROUP):
if isinstance(previousList[0], clef.Clef):
isRepetition = areGroupingsIdentical(previousList[1:], groupingList)
else:
isRepetition = areGroupingsIdentical(previousList, groupingList)
if isRepetition:
previousList.numRepeats += 1
del self[groupingKey]
continue
if groupingKey.affinity == Affinity.SIGNATURE:
for brailleElement in groupingList:
if isinstance(brailleElement, meter.TimeSignature):
currentTimeSig = brailleElement
elif isinstance(brailleElement, key.KeySignature):
brailleElement.outgoingKeySig = currentKeySig
currentKeySig = brailleElement
elif groupingKey.affinity == Affinity.NOTEGROUP:
if isinstance(groupingList[0], clef.Clef):
if isinstance(groupingList[0], (clef.TrebleClef, clef.AltoClef)):
descendingChords = True
elif isinstance(groupingList[0], (clef.BassClef, clef.TenorClef)):
descendingChords = False
# make a whole rest no matter the length of the rest if only one note.
allGeneralNotes = [n for n in groupingList if isinstance(n, note.GeneralNote)]
if len(allGeneralNotes) == 1 and isinstance(allGeneralNotes[0], note.Rest):
allGeneralNotes[0].fullMeasure = True
groupingList.keySignature = currentKeySig
groupingList.timeSignature = currentTimeSig
groupingList.descendingChords = descendingChords
groupingList.showClefSigns = showClefSigns
groupingList.upperFirstInNoteFingering = upperFirstInNoteFingering
(previousKey, previousList) = (groupingKey, groupingList)
if self.endHyphen:
previousList.withHyphen = True
def addSegmentAttributes(self, **partKeywords):
'''
Modifies the attributes of a :class:`~music21.braille.segment.BrailleSegment`
by passing partKeywords as an argument.
'''
if 'cancelOutgoingKeySig' in partKeywords:
self.cancelOutgoingKeySig = partKeywords['cancelOutgoingKeySig']
if 'dummyRestLength' in partKeywords:
self.dummyRestLength = partKeywords['dummyRestLength']
if 'lineLength' in partKeywords:
self.lineLength = partKeywords['lineLength']
if 'showFirstMeasureNumber' in partKeywords:
self.showFirstMeasureNumber = partKeywords['showFirstMeasureNumber']
if 'showHand' in partKeywords:
self.showHand = partKeywords['showHand']
if 'showHeading' in partKeywords:
self.showHeading = partKeywords['showHeading']
if 'suppressOctaveMarks' in partKeywords:
self.suppressOctaveMarks = partKeywords['suppressOctaveMarks']
def fixArticulations(self):
'''
Goes through each :class:`~music21.braille.segment.BrailleSegment` and modifies the
list of :attr:`~music21.note.GeneralNote.articulations` of a :class:`~music21.note.Note`
if appropriate. In particular, two rules are applied:
* Doubling rule => If four or more of the same :class:`~music21.articulations.Articulation`
are found in a row, the first instance of the articulation is doubled and the rest are
omitted.
* Staccato, Tenuto rule => "If two repeated notes appear to be tied, but either is marked
staccato or tenuto, they are treated as slurred instead of tied." (BMTM, 112)
'''
from music21 import articulations
def fixOneArticulation(artic, music21NoteStart, allNotes, noteIndexStart):
articName = artic.name
if articName == 'fingering': # fingerings are not considered articulations...
return
if (isinstance(artic, (articulations.Staccato, articulations.Tenuto))
and music21NoteStart.tie is not None):
if music21NoteStart.tie.type == 'stop':
allNotes[noteIndexStart - 1].tie = None
allNotes[noteIndexStart - 1].shortSlur = True
else:
allNotes[noteIndexStart + 1].tie = None
music21NoteStart.shortSlur = True
music21NoteStart.tie = None
numSequential = 0
for noteIndexContinue in range(noteIndexStart + 1, len(allNotes)):
music21NoteContinue = allNotes[noteIndexContinue]
if articName in [a.name for a in music21NoteContinue.articulations]:
numSequential += 1
continue
break
if numSequential < 3:
return
# else:
# double the articulation on the first note and remove from the next...
music21NoteStart.articulations.append(artic)
for noteIndexContinue in range(noteIndexStart + 1,
noteIndexStart + numSequential):
music21NoteContinue = allNotes[noteIndexContinue]
for artOther in music21NoteContinue.articulations:
if artOther.name == articName:
music21NoteContinue.articulations.remove(artOther)
newSegment = self.consolidate()
noteGroupings = [newSegment[gpKey]
for gpKey in newSegment.keys()
if gpKey.affinity == Affinity.NOTEGROUP]
for noteGrouping in noteGroupings:
allNotes_outer = [n for n in noteGrouping if isinstance(n, note.Note)]
for noteIndexStart_outer in range(len(allNotes_outer)):
music21NoteStart_outer = allNotes_outer[noteIndexStart_outer]
for artic_outer in music21NoteStart_outer.articulations:
fixOneArticulation(
artic_outer,
music21NoteStart_outer,
allNotes_outer,
noteIndexStart_outer
)
class BrailleGrandSegment(BrailleSegment, text.BrailleKeyboard):
'''
A BrailleGrandSegment represents a pair of segments (rightSegment, leftSegment)
representing the right and left hands of a piano staff (or other two-staff object)
'''
def __init__(self):
BrailleSegment.__init__(self)
text.BrailleKeyboard.__init__(self, lineLength=SEGMENT_LINELENGTH)
self.allKeyPairs = []
self.previousGroupingPair = None
self.currentGroupingPair = None
@property
def brailleText(self):
return text.BrailleKeyboard.__str__(self)
def __str__(self):
name = '<music21.braille.segment BrailleGrandSegment>\n==='
allPairs = []
for (rightKey, leftKey) in self.yieldCombinedGroupingKeys():
if rightKey is not None:
rightHeading = 'Measure {0} Right, {1} {2}:\n'.format(
rightKey.measure, affinityNames[rightKey.affinity], rightKey.ordinal + 1)
rightContents = str(self._groupingDict.get(rightKey))
rightFull = ''.join([rightHeading, rightContents])
else:
rightFull = ''
if leftKey is not None:
leftHeading = '\nMeasure {0} Left, {1} {2}:\n'.format(
leftKey.measure, affinityNames[leftKey.affinity], leftKey.ordinal + 1)
leftContents = str(self._groupingDict.get(leftKey))
leftFull = ''.join([leftHeading, leftContents])
else:
leftFull = ''
allPairs.append('\n'.join([rightFull, leftFull, '====\n']))
out = '\n'.join(['---begin grand segment---', name, ''.join(allPairs),
'---end grand segment---'])
return out
def yieldCombinedGroupingKeys(self):
'''
yields all the keys in order as a tuple of (rightKey, leftKey) where
two keys are grouped if they have the same segmentKey except for the hand.
>>> bgs = braille.segment.BrailleGrandSegment()
>>> SegmentKey = braille.segment.SegmentKey # namedtuple
>>> bgs[SegmentKey(1, 1, 1, 'right')] = '1r'
>>> bgs[SegmentKey(1, 1, 1, 'left')] = '1l'
>>> bgs[SegmentKey(1, 2, 3, 'right')] = '2r'
>>> bgs[SegmentKey(1, 2, 4, 'left')] = '3l'
>>> bgs[SegmentKey(2, 1, 9, 'left')] = '4l'
>>> bgs[SegmentKey(2, 1, 9, 'right')] = '4r'
>>> bgs[SegmentKey(3, 1, 9, 'right')] = '5r'
>>> for l, r in bgs.yieldCombinedGroupingKeys():
... (bgs[l], bgs[r])
('1r', '1l')
('2r', <music21.braille.segment.BrailleElementGrouping []>)
(<music21.braille.segment.BrailleElementGrouping []>, '3l')
('4r', '4l')
('5r', <music21.braille.segment.BrailleElementGrouping []>)
'''
def segmentKeySortKey(segmentKey):
'''
sort by measure, then ordinal, then affinity, then hand (r then l)
'''
if segmentKey.hand == 'right':
skH = -1
else:
skH = 1
return (segmentKey.measure, segmentKey.ordinal, segmentKey.affinity, skH)
def matchOther(thisKey_inner, otherKey):
if (thisKey_inner.measure == otherKey.measure
and thisKey_inner.ordinal == otherKey.ordinal
and thisKey_inner.affinity == otherKey.affinity):
return True
else:
return False
storedRight = None
storedLeft = None
for thisKey in sorted(self.keys(), key=segmentKeySortKey):
if thisKey.hand == 'right':
if storedLeft is not None:
if matchOther(thisKey, storedLeft):
yield(thisKey, storedLeft)
elif (thisKey.affinity == Affinity.NOTEGROUP
and matchOther(thisKey._replace(affinity=Affinity.INACCORD), storedLeft)):
# r.h. notegroup goes before an lh inaccord, despite this being out of order
yield(thisKey, storedLeft)
else:
yield(None, storedLeft)
storedRight = thisKey
storedLeft = None
else:
storedRight = thisKey
elif thisKey.hand == 'left':
if storedRight is not None:
if matchOther(thisKey, storedRight):
yield(storedRight, thisKey)
elif storedRight.affinity < Affinity.INACCORD:
yield(storedRight, None)
yield(None, thisKey)
else:
yield(storedRight, None)
storedLeft = thisKey
storedRight = None
else:
storedLeft = thisKey
if storedRight:
yield (storedRight, None)
if storedLeft:
yield (None, storedLeft)
# def combineGroupingKeys(self, rightSegment, leftSegment):
# # return list(self.yieldCombinedGroupingKeys())
#
# groupingKeysRight = sorted(rightSegment.keys())
# | |
Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy.
"""
return pulumi.get(self, "unhealthy_threshold")
@unhealthy_threshold.setter
def unhealthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unhealthy_threshold", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendHealthcheckGrpcHealthcheckArgs:
def __init__(__self__, *,
service_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] service_name: Service name for grpc.health.v1.HealthCheckRequest message.
"""
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
Service name for grpc.health.v1.HealthCheckRequest message.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendHealthcheckHttpHealthcheckArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
host: Optional[pulumi.Input[str]] = None,
http2: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] path: HTTP path.
:param pulumi.Input[str] host: "Host" HTTP header value.
:param pulumi.Input[bool] http2: If set, health checks will use HTTP2.
"""
pulumi.set(__self__, "path", path)
if host is not None:
pulumi.set(__self__, "host", host)
if http2 is not None:
pulumi.set(__self__, "http2", http2)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
HTTP path.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
"Host" HTTP header value.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def http2(self) -> Optional[pulumi.Input[bool]]:
"""
If set, health checks will use HTTP2.
"""
return pulumi.get(self, "http2")
@http2.setter
def http2(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "http2", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendHealthcheckStreamHealthcheckArgs:
def __init__(__self__, *,
receive: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] receive: Text to search in reply.
:param pulumi.Input[str] send: Message to send. If empty, it's a connect-only health check.
"""
if receive is not None:
pulumi.set(__self__, "receive", receive)
if send is not None:
pulumi.set(__self__, "send", send)
@property
@pulumi.getter
def receive(self) -> Optional[pulumi.Input[str]]:
"""
Text to search in reply.
"""
return pulumi.get(self, "receive")
@receive.setter
def receive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receive", value)
@property
@pulumi.getter
def send(self) -> Optional[pulumi.Input[str]]:
"""
Message to send. If empty, it's a connect-only health check.
"""
return pulumi.get(self, "send")
@send.setter
def send(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendLoadBalancingConfigArgs:
def __init__(__self__, *,
locality_aware_routing_percent: Optional[pulumi.Input[int]] = None,
panic_threshold: Optional[pulumi.Input[int]] = None,
strict_locality: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[int] locality_aware_routing_percent: Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones.
:param pulumi.Input[int] panic_threshold: If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold.
:param pulumi.Input[bool] strict_locality: If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones.
"""
if locality_aware_routing_percent is not None:
pulumi.set(__self__, "locality_aware_routing_percent", locality_aware_routing_percent)
if panic_threshold is not None:
pulumi.set(__self__, "panic_threshold", panic_threshold)
if strict_locality is not None:
pulumi.set(__self__, "strict_locality", strict_locality)
@property
@pulumi.getter(name="localityAwareRoutingPercent")
def locality_aware_routing_percent(self) -> Optional[pulumi.Input[int]]:
"""
Percent of traffic to be sent to the same availability zone. The rest will be equally divided between other zones.
"""
return pulumi.get(self, "locality_aware_routing_percent")
@locality_aware_routing_percent.setter
def locality_aware_routing_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "locality_aware_routing_percent", value)
@property
@pulumi.getter(name="panicThreshold")
def panic_threshold(self) -> Optional[pulumi.Input[int]]:
"""
If percentage of healthy hosts in the backend is lower than panic_threshold, traffic will be routed to all backends no matter what the health status is. This helps to avoid healthy backends overloading when everything is bad. Zero means no panic threshold.
"""
return pulumi.get(self, "panic_threshold")
@panic_threshold.setter
def panic_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "panic_threshold", value)
@property
@pulumi.getter(name="strictLocality")
def strict_locality(self) -> Optional[pulumi.Input[bool]]:
"""
If set, will route requests only to the same availability zone. Balancer won't know about endpoints in other zones.
"""
return pulumi.get(self, "strict_locality")
@strict_locality.setter
def strict_locality(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "strict_locality", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendTlsArgs:
def __init__(__self__, *,
sni: Optional[pulumi.Input[str]] = None,
validation_context: Optional[pulumi.Input['AlbBackendGroupGrpcBackendTlsValidationContextArgs']] = None):
"""
:param pulumi.Input[str] sni: [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) string for TLS connections.
* `validation_context.0.trusted_ca_id` - (Optional) Trusted CA certificate ID in the Certificate Manager.
* `validation_context.0.trusted_ca_bytes` - (Optional) PEM-encoded trusted CA certificate chain.
"""
if sni is not None:
pulumi.set(__self__, "sni", sni)
if validation_context is not None:
pulumi.set(__self__, "validation_context", validation_context)
@property
@pulumi.getter
def sni(self) -> Optional[pulumi.Input[str]]:
"""
[SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) string for TLS connections.
* `validation_context.0.trusted_ca_id` - (Optional) Trusted CA certificate ID in the Certificate Manager.
* `validation_context.0.trusted_ca_bytes` - (Optional) PEM-encoded trusted CA certificate chain.
"""
return pulumi.get(self, "sni")
@sni.setter
def sni(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sni", value)
@property
@pulumi.getter(name="validationContext")
def validation_context(self) -> Optional[pulumi.Input['AlbBackendGroupGrpcBackendTlsValidationContextArgs']]:
return pulumi.get(self, "validation_context")
@validation_context.setter
def validation_context(self, value: Optional[pulumi.Input['AlbBackendGroupGrpcBackendTlsValidationContextArgs']]):
pulumi.set(self, "validation_context", value)
@pulumi.input_type
class AlbBackendGroupGrpcBackendTlsValidationContextArgs:
def __init__(__self__, *,
trusted_ca_bytes: Optional[pulumi.Input[str]] = None,
trusted_ca_id: Optional[pulumi.Input[str]] = None):
if trusted_ca_bytes is not None:
pulumi.set(__self__, "trusted_ca_bytes", trusted_ca_bytes)
if trusted_ca_id is not None:
pulumi.set(__self__, "trusted_ca_id", trusted_ca_id)
@property
@pulumi.getter(name="trustedCaBytes")
def trusted_ca_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "trusted_ca_bytes")
@trusted_ca_bytes.setter
def trusted_ca_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trusted_ca_bytes", value)
@property
@pulumi.getter(name="trustedCaId")
def trusted_ca_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "trusted_ca_id")
@trusted_ca_id.setter
def trusted_ca_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trusted_ca_id", value)
@pulumi.input_type
class AlbBackendGroupHttpBackendArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
target_group_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
healthcheck: Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckArgs']] = None,
http2: Optional[pulumi.Input[bool]] = None,
load_balancing_config: Optional[pulumi.Input['AlbBackendGroupHttpBackendLoadBalancingConfigArgs']] = None,
port: Optional[pulumi.Input[int]] = None,
tls: Optional[pulumi.Input['AlbBackendGroupHttpBackendTlsArgs']] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] name: Name of the backend.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_group_ids: References target groups for the backend.
:param pulumi.Input['AlbBackendGroupHttpBackendHealthcheckArgs'] healthcheck: Healthcheck specification that will be used by this backend. Structure is documented below.
:param pulumi.Input[bool] http2: If set, health checks will use HTTP2.
:param pulumi.Input['AlbBackendGroupHttpBackendLoadBalancingConfigArgs'] load_balancing_config: Load Balancing Config specification that will be used by this backend. Structure is documented below.
:param pulumi.Input[int] port: Port for incoming traffic.
:param pulumi.Input['AlbBackendGroupHttpBackendTlsArgs'] tls: Tls specification that will be used by this backend. Structure is documented below.
:param pulumi.Input[int] weight: Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "target_group_ids", target_group_ids)
if healthcheck is not None:
pulumi.set(__self__, "healthcheck", healthcheck)
if http2 is not None:
pulumi.set(__self__, "http2", http2)
if load_balancing_config is not None:
pulumi.set(__self__, "load_balancing_config", load_balancing_config)
if port is not None:
pulumi.set(__self__, "port", port)
if tls is not None:
pulumi.set(__self__, "tls", tls)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the backend.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetGroupIds")
def target_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
References target groups for the backend.
"""
return pulumi.get(self, "target_group_ids")
@target_group_ids.setter
def target_group_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "target_group_ids", value)
@property
@pulumi.getter
def healthcheck(self) -> Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckArgs']]:
"""
Healthcheck specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "healthcheck")
@healthcheck.setter
def healthcheck(self, value: Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckArgs']]):
pulumi.set(self, "healthcheck", value)
@property
@pulumi.getter
def http2(self) -> Optional[pulumi.Input[bool]]:
"""
If set, health checks will use HTTP2.
"""
return pulumi.get(self, "http2")
@http2.setter
def http2(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "http2", value)
@property
@pulumi.getter(name="loadBalancingConfig")
def load_balancing_config(self) -> Optional[pulumi.Input['AlbBackendGroupHttpBackendLoadBalancingConfigArgs']]:
"""
Load Balancing Config specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "load_balancing_config")
@load_balancing_config.setter
def load_balancing_config(self, value: Optional[pulumi.Input['AlbBackendGroupHttpBackendLoadBalancingConfigArgs']]):
pulumi.set(self, "load_balancing_config", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port for incoming traffic.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input['AlbBackendGroupHttpBackendTlsArgs']]:
"""
Tls specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input['AlbBackendGroupHttpBackendTlsArgs']]):
pulumi.set(self, "tls", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class AlbBackendGroupHttpBackendHealthcheckArgs:
def __init__(__self__, *,
interval: pulumi.Input[str],
timeout: pulumi.Input[str],
grpc_healthcheck: Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckGrpcHealthcheckArgs']] = None,
healthcheck_port: Optional[pulumi.Input[int]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
http_healthcheck: Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckHttpHealthcheckArgs']] = None,
interval_jitter_percent: Optional[pulumi.Input[float]] = None,
stream_healthcheck: Optional[pulumi.Input['AlbBackendGroupHttpBackendHealthcheckStreamHealthcheckArgs']] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] interval: Interval between health checks.
:param pulumi.Input[str] timeout: Time to wait for a health check response.
:param pulumi.Input['AlbBackendGroupHttpBackendHealthcheckGrpcHealthcheckArgs'] grpc_healthcheck: Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below.
:param pulumi.Input[int] healthcheck_port: Optional alternative port for health checking.
:param pulumi.Input[int] healthy_threshold: Number of consecutive successful health checks required to promote endpoint into the healthy | |
settings, the " + \
"requested trigger period must be increased to " + \
str(requested_trigger_period - internal_delay_period) + \
" s minimum. Should the settings change, this minimum "+ \
"value may increase. It is thus good practice to add " + \
"some additional time to the updated value.")
elif self.getValue('Dynamic repetition rate'):
# Expand the calculated delay.
# Note: subtracting a negative value.
internal_delay_period = \
requested_trigger_period - internal_delay_period
# Update the Labber setting.
self.setValue( \
'Internal trigger period', \
internal_delay_period
)
# # Insert the calculated value into the sequencer program.
# internal_trigger_waiting = \
# int(round(internal_delay_period * sequencer_clk))
# self.local_awg_program = self.local_awg_program.replace(\
# '&DELAY_BEFORE_LOOP_END', \
# 'wait(' +str(internal_trigger_waiting)+ ');')
else:
# The repetition rate is illegal but the user wishes to
# ignore said fact.
self.log( "Warning: illegal repetition rate detected " + \
"and ignored.", level=30)
# "Return" an internal delay period. This is used in the sequencer
# program generator.
self.verified_internal_delay_period = internal_delay_period
#######################################
""" Generate AWG sequencer program. """
#######################################
def generateSequencerProgram(self):
'''This function generates a local AWG program, that in turn will be
uploaded into the sequencer.
The general layout of the sequencer program generation is to assemble
a skeleton dictionary bearing &-tags. Depending on a vast array of
options, these tags will be modified by the generation functions
accordingly. {'waveform_declaration','&'} may for instance be replaced
with the waveform declarations, enabling the instrument to play the
Labber-defined waveforms.
Default skeleton:
self.local_awg_program = { \
'WAVEFORM_DECLARATION' : "&" , \
'WHILE_LOOP_START' : "&" , \
'WAIT_FOR_INITIAL_TRIGGER' : "&" , \
'SYNCHRONISE_TO_BEATING_FREQUENCY' : "&" , \
'START_TRIGGER_PULSE' : "&" , \
'PLAYWAVE' : "&" , \
'WAITWAVE' : "&" , \
'DELAY_BEFORE_END_TRIGGER' : "&" , \
'END_TRIGGER_PULSE' : "&" , \
'DELAY_BEFORE_LOOP_END' : "&" , \
'WAIT_FOR_TRIGGER_TO_REPEAT' : "&" , \
'WHILE_LOOP_END' : "&" , \
'TIMESTAMP' : "&" , \
}
'''
# Calculate basic clock and samling rates, used for several functions
# in the sequencer program generation.
sample_rate = \
self.getValue('Output sample rate') / \
2**self.getValueIndex('Output sample rate divisor')
# The sequencer operational count (OPS) is 1/8 of the sample clock.
sequencer_clk = sample_rate / 8.0
# The channel grouping has been modified at the performSet for
# every command that involves the usage of the on-board oscillators.
# # # # Generate program # # # #
# TODO DEBUG
self.log('Should we update local program [0]? : '+str(self.update_local_awg_program[0])+'\nDID any waveform have markers? = '+str(any(self.waveform_has_markers)),level=30)
# Are there any changes to entry 0:
# MARKER_DECLARATION, WAVEFORM_DECLARATION, PLAYWAVE, WAITWAVE?
if self.update_local_awg_program[0]:
# Waveform declaration and playwave compiler prototypes.
waveform_declaration_setup = ''
playwave_setup = ''
# Should we place commas between waveforms?
first_waveform_declared = False
# Should there be a marker declaration in the beginning?
if any(self.waveform_has_markers):
# Add marker declaration.
waveform_declaration_setup += \
'wave w_m = marker({0}, 1);\n'.format(self.buffer_length)
# What waveforms should be declared with a marker?
self.declare_marker = [False] * self.n_ch
for n in range(0, self.highest_waveform_in_use, 2):
# For all channels
if n < self.highest_waveform_in_use-1:
if self.waveform_has_markers[n] or self.waveform_has_markers[n+1]:
self.declare_marker[n] = True
self.declare_marker[n+1] = True
elif n == self.highest_waveform_in_use-1:
# But, if this waveform is the highest waveform in use,
# and the following (non-existant) waveform has marker
# data, then do not declare markers on the higher part
# of the waveform pair.
if self.waveform_has_markers[n]:
self.declare_marker[n] = True
# How many waveforms should be declared?
# Remember that self.highest_waveform_in_use = 0 corresponds to no
# waveforms declared.
for n in range(0, self.highest_waveform_in_use):
# Is this waveform wasted? If len > 0, then no.
if len(self.loaded_waveforms[n]) > 0:
# TODO This here below is a variant waveform
# declaration using randomUniform. I've been told that
# using zeros might cause unwanted optimisation in the
# SeqC compiler, so that for instance the setVector
# command would not be able to correctly upload
# waveforms.
# 'wave w{0} = randomUniform({1},1e-4) + m1;\n'\
# .format(n+1, self.buffer_length)
if(self.declare_marker[n]):
waveform_declaration_setup += \
'wave w{0} = zeros({1}) + w_m;\n'\
.format(n+1, self.buffer_length)
else:
waveform_declaration_setup += \
'wave w{0} = zeros({1});\n'\
.format(n+1, self.buffer_length)
else:
# Waveform is wasted. Add markers or not?
if(self.declare_marker[n]):
waveform_declaration_setup += \
'wave w{0} = zeros({1}) + w_m; // Unused.\n'\
.format(n+1, self.buffer_length)
else:
waveform_declaration_setup += \
'wave w{0} = zeros({1}); // Unused.\n'\
.format(n+1, self.buffer_length)
# Waveform initial declaration / generation
if first_waveform_declared:
playwave_setup += ', {0}, w{0}'.format(n+1)
else:
# Declare the first waveform for playback
playwave_setup += '{0}, w{0}'.format(n+1)
first_waveform_declared = True
# The condition for checking the waveform declaration is covered
# by the playwave setup condition, thus the actions have been
# combined.
if playwave_setup != '':
self.local_awg_program.update({ \
'WAVEFORM_DECLARATION':waveform_declaration_setup + '\n', \
'PLAYWAVE':'\tplayWave('+playwave_setup+');\n', \
'WAITWAVE':'\twaitWave();\n'})
else:
# There are no waves to play, remove all instances related
# to playing a wave. The HDAWG has a tendancy to crash if this
# step is done improperly.
self.local_awg_program.update({ \
'WAVEFORM_DECLARATION':'', \
'PLAYWAVE':'', \
'WAITWAVE':''})
# Are there any changes to entry 1:
# WHILE_LOOP_START, WHILE_LOOP_END?
# (Aka: 'Is the measurement of some single-shot type?)'
if self.update_local_awg_program[1]:
# TODO: perform a check whether this is a single shot measurement.
# if( Single shot measurement )
''' TODO There is currently no setting which modifies this part of the generateSequencerProgram function. '''
self.local_awg_program.update({ \
'WHILE_LOOP_START':'while(true){\n', \
'WHILE_LOOP_END':'}\n\n'})
# else:
# self.local_awg_program.update({ \
# 'WHILE_LOOP_START':'', \
# 'WHILE_LOOP_END':''})
# Are there any changes to entry 2:
# WAIT_FOR_INITIAL_TRIGGER, DELAY_BEFORE_LOOP_END,
# WAIT_FOR_TRIGGER_TO_REPEAT?
if self.update_local_awg_program[2]:
# How and when should the HDAWG play the sequencer?
trigger_mode = self.getValue('Run mode')
if trigger_mode == 'Play once, then external trigger':
# The 'Play once, then external trigger' option is very similar
# to the external trigger apart from playing the AWG once
# to initiate the measurement cycle.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'', \
'WAIT_FOR_TRIGGER_TO_REPEAT':'\twaitDigTrigger(1);\n', \
'DELAY_BEFORE_LOOP_END':''})
elif trigger_mode == 'Internal trigger':
# On internal trigger, set up a delay at the end of
# the sequencer program.
# Trash the 'wait_for_trigger' tags.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'', \
'WAIT_FOR_TRIGGER_TO_REPEAT':''})
# At this point in time, the isFinalCall subfunction
# already checked and verified the internal repetition delay
# if any. If the "returned" verified_internal_delay_period is
# negative, and the checkInternalRepetitionRateValid function
# did not halt the program - then perform the following action:
if self.verified_internal_delay_period < 0:
# The checked internal delay period is negative ergo
# impossible to represent.
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END': \
'\t// Invalid internal repetition delay.\n'})
elif self.getValue('Use oscillator-based repetition delay'):
# Insert oscillator waiting code
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END':'\twaitSineOscPhase(2);\n'})
else:
# Insert the calculated wait delay before the final loop
# as done by the checkInternalRepetitionRateValid function.
internal_delay_period = self.verified_internal_delay_period
internal_trigger_waiting = \
int(round(internal_delay_period * sequencer_clk))
self.local_awg_program.update({ \
'DELAY_BEFORE_LOOP_END': \
'\twait(' + str(internal_trigger_waiting) + ');\n'})
elif trigger_mode == 'External trigger':
# On external trigger, the AWG will halt its execution in the
# beginning of the sequencer program. It proceeds to await an
# external triggering signal.
self.local_awg_program.update({ \
'WAIT_FOR_INITIAL_TRIGGER':'\twaitDigTrigger(1);\n', \
'WAIT_FOR_TRIGGER_TO_REPEAT':'', \
'DELAY_BEFORE_LOOP_END':''})
else:
raise ValueError( \
"Unknown run mode acquired, there is likely " + \
"an error in the driver .ini-file.")
# Are there any changes to entry 3:
# SYNCHRONISE_TO_BEATING_FREQUENCY?
if self.update_local_awg_program[3]:
# Synchronise to beating frequency to minimise inter-device jitter?
if self.getValue('Minimise inter-device asynchronous jitter'):
self.local_awg_program.update({ \
'SYNCHRONISE_TO_BEATING_FREQUENCY':'\twaitSineOscPhase(1);\n'})
else:
self.local_awg_program.update({ \
'SYNCHRONISE_TO_BEATING_FREQUENCY':''})
# Are there any changes to entry 4:
# START_TRIGGER_PULSE, END_TRIGGER_PULSE?
if self.update_local_awg_program[4]:
# Sequencer triggers
sequencer_trigger = self.getValue('Sequencer triggers')
if sequencer_trigger == 'Send at AWG program | |
<gh_stars>0
#!/usr/bin/env python
"""
Grapical User Interface for inspecting images.
Author: <NAME>, 16 Jan 2009 - 29 Jun 2017
"""
import wx
from math import sqrt,atan2,sin,cos,pi,log10
from numpy import *
__version__ = "4.2.2" # show_image
class ImageViewer_Window (wx.Frame):
image_timestamp = 0
def __init__(self,show=True,image_file="",mask_file=""):
"""
default_orientation: default image rotation in degrees
positive = counter-clock wise
allowed values: 0,-90,90,180
only use at first invecation as default value, last saved value
overrides this value.
show: display the window immediately
"""
wx.Frame.__init__(self,parent=None,size=(425,340))
self.Bind (wx.EVT_CLOSE,self.OnClose)
# Menus
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.Append (101,"&Open Image...\tCtrl+O","File formats: TIFF,JPEG,PNG")
self.Bind (wx.EVT_MENU,self.OpenImage,id=101)
menu.Append (111,"&New Window...\tCtrl+N","Open Image in a new window")
self.Bind (wx.EVT_MENU,self.NewWindow,id=111)
menu.Append (102,"&Overlay Mask...","File formats: TIFF,JPEG,PNG")
self.Bind (wx.EVT_MENU,self.OpenMask,id=102)
menu.Append (103,"&Close Image")
self.Bind (wx.EVT_MENU,self.CloseImage,id=103)
menu.Append (104,"&Close Mask")
self.Bind (wx.EVT_MENU,self.CloseMask,id=104)
menu.AppendSeparator()
menu.Append (107,"&Save Image As...\tCtrl+S","File formats: TIFF,JPEG,PNG")
self.Bind (wx.EVT_MENU,self.SaveImage,id=107)
menu.Append (108,"&Save Mask As...","File formats: TIFF,JPEG,PNG")
self.Bind (wx.EVT_MENU,self.SaveMask,id=108)
menu.AppendSeparator()
menu.Append (110,"E&xit","Terminates this application.")
self.Bind (wx.EVT_MENU,self.OnExit,id=110)
menuBar.Append (menu,"&File")
menu = wx.Menu()
menu.Append (201,"&Copy Image","Puts full image into clipboard")
self.Bind (wx.EVT_MENU,self.CopyImage,id=201)
menuBar.Append (menu,"&Edit")
menu = self.OrientationMenu = wx.Menu()
style = wx.ITEM_RADIO
menu.Append (301,"Original","Do not rotate image",style)
menu.Append (302,"Rotated Clockwise","Rotate image by -90 deg",style)
menu.Append (303,"Rotated Counter-clockwise","Rotate image by +90 deg",style)
menu.Append (304,"Upside down","Rotate image by 180 deg",style)
for id in range(301,305): self.Bind (wx.EVT_MENU,self.OnOrientation,id=id)
menuBar.Append (menu,"&Orientation")
self.SetMenuBar (menuBar)
# Controls
self.CreateStatusBar()
self.panel = wx.Panel(self)
self.ImageViewer = ImageViewer (self.panel)
self.LiveImage = wx.CheckBox (self.panel,label="Live")
self.LiveImage.ToolTip = wx.ToolTip("Follow the data collection, show latest image")
self.First = wx.Button(self.panel,label="|<",size=(40,-1))
self.First.ToolTip = wx.ToolTip("Go to the first image in current directory")
self.Bind (wx.EVT_BUTTON,self.OnFirst,self.First)
self.Back = wx.Button(self.panel,label="< Back")
self.Back.ToolTip = wx.ToolTip("Go to the previous image in current directory")
self.Bind (wx.EVT_BUTTON,self.OnBack,self.Back)
self.Next = wx.Button(self.panel,label="Next >")
self.Next.ToolTip = wx.ToolTip("Go to the next image in current directory")
self.Bind (wx.EVT_BUTTON,self.OnNext,self.Next)
self.Last = wx.Button(self.panel,label=">|",size=(40,-1))
self.Last.ToolTip = wx.ToolTip("Go to the last image in current directory")
self.Bind (wx.EVT_BUTTON,self.OnLast,self.Last)
self.Order = wx.Choice(self.panel,choices=["By Name","By Time"])
self.Order.ToolTip = wx.ToolTip("Step through images by name or timestamp?")
self.Filter = wx.ComboBox(self.panel,size=(85,-1),style=wx.TE_PROCESS_ENTER,
choices=["*.*","*.mccd","*.rx","*.tif","*.tiff"])
self.Filter.Value = "*.*"
self.Filter.ToolTip = wx.ToolTip("Filter pattern for image files, e.g. *.tif")
# Layout
self.layout = wx.BoxSizer(wx.VERTICAL)
self.layout.Add (self.ImageViewer,proportion=1,flag=wx.EXPAND) # growable
self.Controls = wx.BoxSizer(wx.HORIZONTAL)
self.Controls.AddSpacer((5,5))
self.Controls.Add (self.LiveImage,flag=wx.ALIGN_CENTER)
self.Controls.AddSpacer((5,5))
self.Controls.Add (self.First,flag=wx.ALIGN_CENTER)
self.Controls.Add (self.Back,flag=wx.ALIGN_CENTER)
self.Controls.Add (self.Next,flag=wx.ALIGN_CENTER)
self.Controls.Add (self.Last,flag=wx.ALIGN_CENTER)
self.Controls.AddSpacer((5,5))
self.Controls.Add (self.Order,flag=wx.ALIGN_CENTER)
self.Controls.AddSpacer((5,5))
self.Controls.Add (self.Filter,flag=wx.ALIGN_CENTER)
self.Controls.AddSpacer((5,5))
self.layout.Add (self.Controls,flag=wx.EXPAND)
self.panel.SetSizer(self.layout)
# Restore last saved settings.
name = "ImageViewer"
self.config_file=wx.StandardPaths.Get().GetUserDataDir()+"/"+name+".py"
self.config = wx.FileConfig (localFilename=self.config_file)
state = self.config.Read('State')
if state:
try: self.State = eval(state)
except Exception,exception:
print "Restore failed: %s: %s" % (exception,state)
# Display images.
from os.path import exists
if exists(image_file): self.image_file = image_file
if exists(mask_file): self.mask_file = mask_file
# Initialization
self.Orientation = self.ImageViewer.Orientation
self.update_title()
if show: self.Show()
self.timer = wx.Timer(self)
self.Bind (wx.EVT_TIMER,self.update,self.timer)
self.timer.Start(1000,oneShot=True)
def OnFirst(self,event):
"""Go to the previous (older) image in current directory"""
self.live_image = False
next = newer_file if self.order == "By Time" else next_file
self.image_file = next(self.image_file,-1e6,self.filter)
def OnBack(self,event):
"""Go to the previous (older) image in current directory"""
self.live_image = False
next = newer_file if self.order == "By Time" else next_file
self.image_file = next(self.image_file,-1,self.filter)
def OnNext(self,event):
"""Go to the next (newer) image in current directory"""
self.live_image = False
next = newer_file if self.order == "By Time" else next_file
self.image_file = next(self.image_file,+1,self.filter)
def OnLast(self,event):
"""Go to the previous (older) image in current directory"""
self.live_image = False
next = newer_file if self.order == "By Time" else next_file
self.image_file = next(self.image_file,+1e6,self.filter)
def update(self,event=None):
"""Periodocally called on timer"""
if self.live_image and self.image_to_show and \
(self.image_to_show != self.image_file\
or getmtime(self.image_to_show) != self.image_timestamp):
##print "loading",self.image_to_show
self.image_file = self.image_to_show
self.timer = wx.Timer(self)
self.Bind (wx.EVT_TIMER,self.update,self.timer)
self.timer.Start(1000,oneShot=True)
def get_live_image(self):
"""Follow the data collection"""
return self.LiveImage.Value
def set_live_image(self,value):
self.LiveImage.Value = value
live_image = property(get_live_image,set_live_image)
def get_order(self):
"""Follow the data collection"""
return self.Order.StringSelection
def set_order(self,value):
self.Order.StringSelection = value
order = property(get_order,set_order)
def get_filter(self):
"""Follow the data collection"""
return self.Filter.Value
def set_filter(self,value):
self.Filter.Value = value
filter = property(get_filter,set_filter)
def get_image_file(self):
return getattr(self,"__image_file__","")
def set_image_file(self,image_file):
from os.path import exists
from numimage import numimage
try: image = numimage(image_file)
except Exception,message:
from sys import stderr
stderr.write("%s: %s\n" % (image_file,message))
image = None
self.ImageViewer.Image = image
self.image_timestamp = getmtime(image_file)
self.__image_file__ = image_file
self.update_title()
##print "image file: %r" % image_file
image_file = property(get_image_file,set_image_file)
def get_mask_file(self):
return getattr(self.ImageViewer.Mask,"filename","")
def set_mask_file(self,mask_file):
from os.path import exists
from numimage import numimage
if not exists(mask_file): mask = None
else:
try: mask = numimage(mask_file)
except Exception,message:
from sys import stderr
stderr.write("%s: %s\n" % (mask_file,message))
mask = None
self.ImageViewer.Mask = mask
self.update_title()
mask_file = property(get_mask_file,set_mask_file)
def update_title(self):
"""Displays the file name of the current image in the title bar of the
window."""
from os.path import basename
title = ""
if self.image_file: title += self.image_file[-80:]+", "
if self.mask_file: title += "mask "+basename(self.mask_file)
if len(title) < 40: title = "Image Viever - "+title
title = title.strip("-, ")
self.Title = title
def OpenImage(self,event):
"""Open an image in te current Window"""
from os.path import dirname,basename
dlg = wx.FileDialog(self,"Open Image",
wildcard="Image Files (*.mccd;*.tif;*.tiff;*.rx;*.png;*.jpg)|"\
"*.mccd;*.tif;*.tiff;*.rx;*.png;*.jpg",
defaultDir=dirname(self.image_file),
defaultFile=basename(self.image_file),
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = str(dlg.Path)
self.image_file = filename
dlg.Destroy()
def NewWindow(self,event):
"""Open an image in a new Window"""
from os.path import dirname,basename
dlg = wx.FileDialog(self,"Open Image",
wildcard="Image Files (*.mccd;*.tif;*.tiff;*.png;*.jpg)|"\
"*.mccd;*.tif;*.tiff;*.png;*.jpg",
defaultDir=dirname(self.image_file),
defaultFile=basename(self.image_file),
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = str(dlg.Path)
app.OpenFile(filename)
dlg.Destroy()
@property
def image_to_show(self):
"""Automatically load this image"""
from DB import dbget
from os.path import exists
from numpy import array
filenames = dbget("ImageViewer.images")
try: filenames = array(eval(filenames))
except: return ""
filenames = filenames[array(exist_files(filenames))]
if len(filenames) == 0: return ""
return filenames[-1]
def OpenMask(self,event):
"Called from menu File/Open Mask..."
from os.path import dirname,basename
dlg = wx.FileDialog(self,"Open Image",
wildcard="Image Files (*.png;*.tif;*.tiff;*.jpg)|"\
"*.png;*.tif;*.tiff;*.jpg",
defaultDir=dirname(self.mask_file),
defaultFile=basename(self.mask_file),
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK: self.mask_file = str(dlg.Path)
dlg.Destroy()
def CloseImage(self,event):
"Called from menu File/Close Mask..."
self.ImageViewer.Image = None
self.image_file = ""
self.image_timestamp = 0
self.update_title()
def CloseMask(self,event):
"Called from menu File/Close Mask..."
self.mask_file = ""
self.update_title()
def SaveImage(self,event):
"Called from menu File/Save Mask As..."
dlg = wx.FileDialog(self,"Save Image As",wildcard="*.tif;*.png;*.jpg",
defaultFile=self.image_file,style=wx.SAVE|wx.OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
self.image_file = str(dlg.Path)
image = self.ImageViewer.Image
image.save (self.image_file)
dlg.Destroy()
def SaveMask(self,event):
"Called from menu File/Save Mask As..."
if not self.ImageViewer.Mask: return
dlg = wx.FileDialog(self,"Save Mask As",wildcard="*.png;*.tif;*.jpg",
defaultFile=self.mask_file,style=wx.SAVE|wx.OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
self.mask_file = str(dlg.Path)
mask = self.ImageViewer.Mask
mask.save (self.mask_file)
dlg.Destroy()
def CopyImage(self,event):
"Called from menu Edit/Copy Image"
bitmap = wx.BitmapFromImage (self.ImageViewer.Image)
bmpdo = wx.BitmapDataObject(bitmap)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(bmpdo)
wx.TheClipboard.Close()
else: wx.MessageBox("Unexpected clipboard problem","Error")
def OnOrientation(self,event):
id = event.GetId()
if id == 301: orientation = 0 # As image
if id == 302: orientation = -90 # Rotated Clockwise
if id == 303: orientation = +90 # Rotated Counter-clockwise
if id == 304: orientation = 180 # Upside down
self.Orientation = orientation
self.ImageViewer.Orientation = orientation
def GetOrientation(self):
"""Reads the image rotation as selected by the 'Orientation' menu.
Returns either 0,-90,90 or 180"""
if self.OrientationMenu.IsChecked(301): return 0
if self.OrientationMenu.IsChecked(302): return -90
if self.OrientationMenu.IsChecked(303): return 90
if self.OrientationMenu.IsChecked(304): return 180
def SetOrientation(self,value):
"""Updates the'Orientation' menu and the displayed image"""
if value == 0: self.OrientationMenu.Check(301,True)
if value == -90: self.OrientationMenu.Check(302,True)
if value == +90: self.OrientationMenu.Check(303,True)
if value == 180: self.OrientationMenu.Check(304,True)
Orientation = property (GetOrientation,SetOrientation,doc=
"Image rotation as defined by the 'Orientation' menu")
def GetState(self):
"This is to save the current settings of the window"
state = {}
state["Size"] = self.Size
state["Position"] = self.Position
state["ImageViewer.State"] = self.ImageViewer.State
state["mask_file"] = self.mask_file
state["order"] = self.order
state["filter"] = self.filter
return state
def SetState(self,state):
"This is to restore the current state of the window"
##print "MainWindow: restoring %r" % state
for key in state: exec("self."+key+"="+repr(state[key]))
State = property(GetState,SetState,doc="settings of the window")
def OnClose(self,event):
"Called on File/Exit or when the widnows's close button is clicked"
# Save settings for next time.
from os.path import exists,dirname
from os import makedirs
directory = dirname(self.config_file)
if not exists(directory): makedirs(directory)
self.config.Write ('State',repr(self.State))
self.config.Flush()
app.CloseWindow(self)
def OnExit(self,event):
"Called on File/Exit or when the widnows's close button is clicked"
# Save settings for next time.
self.config.Write ('State',repr(self.State))
self.config.Flush()
app.ExitApp()
class ImageViewer (wx.Panel):
"""Grapical User Interface for inspecting images."""
def __init__(self,parent):
"""Parent: top level window"""
wx.Panel.__init__(self,parent)
# Controls
self.ImageWindow = ImageWindow(self)
choices = ["200%","100%","50%","33%","25%","Fit Width"]
self.ScaleFactorControl = wx.ComboBox(self,value="100%",
choices=choices,style=wx.CB_DROPDOWN|wx.TE_PROCESS_ENTER)
self.SaturationLevelText = wx.TextCtrl (self,size=(50,-1),
style=wx.TE_PROCESS_ENTER)
self.SaturationValue_modified = False
| |
import scipy as sp
import scipy.ndimage as spim
import scipy.sparse as sprs
import warnings
import porespy as ps
from scipy.sparse import csgraph
from openpnm.utils import PrintableDict, logging, Workspace
ws = Workspace()
logger = logging.getLogger(__name__)
def find_neighbor_sites(sites, am, flatten=True, include_input=False,
logic='or'):
r"""
Given a symmetric adjacency matrix, finds all sites that are connected
to the input sites.
Parameters
----------
am : scipy.sparse matrix
The adjacency matrix of the network. Must be symmetrical such that if
sites *i* and *j* are connected, the matrix contains non-zero values
at locations (i, j) and (j, i).
flatten : boolean
If ``True`` (default) the returned result is a compressed array of all
neighbors, or a list of lists with each sub-list containing the
neighbors for each input site. Note that an *unflattened* list might
be slow to generate since it is a Python ``list`` rather than a Numpy
array.
include_input : boolean
If ``False`` (default) the input sites will be removed from the result.
logic : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input sites. This is also
known as the 'union' in set theory or 'any' in boolean logic. Both
keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input site. This is
useful for finding the sites that are not shared by any of the input
sites. 'exclusive_or' is also accepted.
**'xnor'** : Neighbors that are shared by two or more input sites. This
is equivalent to finding all neighbors with 'or', minus those found
with 'xor', and is useful for finding neighbors that the inputs have
in common. 'nxor' is also accepted.
**'and'** : Only neighbors shared by all input sites. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
An array containing the neighboring sites filtered by the given logic. If
``flatten`` is ``False`` then the result is a list of lists containing the
neighbors of each input site.
See Also
--------
find_complement
Notes
-----
The ``logic`` options are applied to neighboring sites only, thus it is not
possible to obtain sites that are part of the global set but not neighbors.
This is because (a) the list global sites might be very large, and (b) it
is not possible to return a list of neighbors for each input site if global
sites are considered.
"""
if am.format != 'lil':
am = am.tolil(copy=False)
n_sites = am.shape[0]
rows = [am.rows[i] for i in sp.array(sites, ndmin=1)]
if len(rows) == 0:
return []
neighbors = sp.hstack(rows).astype(sp.int64) # Flatten list to apply logic
if logic in ['or', 'union', 'any']:
neighbors = sp.unique(neighbors)
elif logic in ['xor', 'exclusive_or']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) == 1)[0])
elif logic in ['xnor', 'nxor']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) > 1)[0])
elif logic in ['and', 'all', 'intersection']:
neighbors = set(neighbors)
[neighbors.intersection_update(i) for i in rows]
neighbors = sp.array(list(neighbors), dtype=sp.int64, ndmin=1)
else:
raise Exception('Specified logic is not implemented')
# Deal with removing inputs or not
mask = sp.zeros(shape=n_sites, dtype=bool)
mask[neighbors] = True
if not include_input:
mask[sites] = False
# Finally flatten or not
if flatten:
neighbors = sp.where(mask)[0]
else:
if (neighbors.size > 0):
for i in range(len(rows)):
vals = sp.array(rows[i], dtype=sp.int64)
rows[i] = vals[mask[vals]]
neighbors = rows
else:
neighbors = [sp.array([], dtype=int) for i in range(len(sites))]
return neighbors
def find_neighbor_bonds(sites, im, flatten=True, logic='or'):
r"""
Given an incidence matrix, finds all sites that are connected to the
input sites.
Parameters
----------
im : scipy.sparse matrix
The incidence matrix of the network. Must be shaped as (N-sites,
N-bonds), with non-zeros indicating which sites are connected.
flatten : boolean (default is ``True``)
Indicates whether the returned result is a compressed array of all
neighbors, or a list of lists with each sub-list containing the
neighbors for each input site. Note that an *unflattened* list might
be slow to generate since it is a Python ``list`` rather than a Numpy
array.
logic : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input sites. This is also
known as the 'union' in set theory or 'any' in boolean logic. Both
keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input site. This is
useful for finding the sites that are not shared by any of the input
sites. 'exclusive_or' is also accepted'.
**'xnor'** : Neighbors that are shared by two or more input sites. This
is equivalent to finding all neighbors with 'or', minus those found
with 'xor', and is useful for finding neighbors that the inputs have
in common. 'nxor' is also accepted.
**'and'** : Only neighbors shared by all input sites. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
An array containing the neighboring bonds filtered by the given logic. If
``flatten`` is ``False`` then the result is a list of lists containing the
neighbors of each given input site.
See Also
--------
find_complement
Notes
-----
The ``logic`` options are applied to neighboring bonds only, thus it is not
possible to obtain bonds that are part of the global set but not neighbors.
This is because (a) the list of global bonds might be very large, and
(b) it is not possible to return a list of neighbors for each input site
if global sites are considered.
"""
if im.format != 'lil':
im = im.tolil(copy=False)
rows = [im.rows[i] for i in sp.array(sites, ndmin=1, dtype=sp.int64)]
if len(rows) == 0:
return []
neighbors = sp.hstack(rows).astype(sp.int64)
n_bonds = int(im.nnz/2)
if logic in ['or', 'union', 'any']:
neighbors = sp.unique(neighbors)
elif logic in ['xor', 'exclusive_or']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) == 1)[0])
elif logic in ['xnor', 'shared']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) > 1)[0])
elif logic in ['and', 'all', 'intersection']:
neighbors = set(neighbors)
[neighbors.intersection_update(i) for i in rows]
neighbors = sp.array(list(neighbors), dtype=int, ndmin=1)
else:
raise Exception('Specified logic is not implemented')
if (flatten is False):
if (neighbors.size > 0):
mask = sp.zeros(shape=n_bonds, dtype=bool)
mask[neighbors] = True
for i in range(len(rows)):
vals = sp.array(rows[i], dtype=sp.int64)
rows[i] = vals[mask[vals]]
neighbors = rows
else:
neighbors = [sp.array([], dtype=sp.int64) for i in range(len(sites))]
return neighbors
def find_connected_sites(bonds, am, flatten=True, logic='or'):
r"""
Given an adjacency matrix, finds which sites are connected to the input
bonds.
Parameters
----------
am : scipy.sparse matrix
The adjacency matrix of the network. Must be symmetrical such that if
sites *i* and *j* are connected, the matrix contains non-zero values
at locations (i, j) and (j, i).
flatten : boolean (default is ``True``)
Indicates whether the returned result is a compressed array of all
neighbors, or a list of lists with each sub-list containing the
neighbors for each input site. Note that an *unflattened* list might
be slow to generate since it is a Python ``list`` rather than a Numpy
array.
logic : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input bonds. This is also
known as the 'union' in set theory or (sometimes) 'any' in boolean
logic. Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input bond. This is
useful for finding the sites that are not shared by any of the input
bonds. 'exclusive_or' is also accepted.
**'xnor'** : Neighbors that are shared by two or more input bonds. This
is equivalent to finding all neighbors with 'or', minus those found
with 'xor', and is useful for finding neighbors that the inputs have
in common. 'nxor' is also accepted.
**'and'** : Only neighbors shared by all input bonds. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as | |
<gh_stars>10-100
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, List, Optional, Union
import aws_cdk.aws_dms as dms
from aws_cdk.aws_iam import PolicyStatement, Role, ServicePrincipal
from aws_ddk_core.config import Config
from aws_ddk_core.resources.commons import BaseSchema
from constructs import Construct
from marshmallow import fields
_logger: logging.Logger = logging.getLogger(__name__)
class DMSEndpointConfiguration(BaseSchema):
"""DDK DMS Endpoint Marshmallow schema."""
class DMSReplicationTaskConfiguration(BaseSchema):
"""DDK DMS ReplicationTask Marshmallow schema."""
class DMSReplicationInstanceConfiguration(BaseSchema):
"""DDK DMS ReplicationInstance Marshmallow schema."""
replication_instance_class = fields.Str()
allocated_storage = fields.Int()
multi_az = fields.Bool()
publicly_accessible = fields.Bool()
class DMSEndpointS3SettingsConfiguration(BaseSchema):
"""DDK DMS Endpoint S3 Settings Marshmallow schema."""
enable_statistics = fields.Bool()
max_file_size = fields.Int()
class DMSFactory:
"""
Class factory create and configure DMS DDK resources,
including endpoints, replication tasks and replication instances.
"""
@staticmethod
def endpoint_settings_s3(
scope: Construct,
id: str,
environment_id: str,
bucket_name: str,
bucket_folder: Optional[str] = None,
service_access_role_arn: Optional[str] = None,
external_table_definition: Optional[str] = None,
enable_statistics: Optional[bool] = None,
max_file_size: Optional[int] = None,
**endpoint_s3_props: Any,
) -> dms.CfnEndpoint.S3SettingsProperty:
"""
Create and configure DMS endpoint settings for s3.
This construct allows to configure parameters of the dms endpoint using ddk.json
configuration file depending on the `environment_id` in which the function is used.
Supported parameters are: `enable_statistics` and `max_file_size`
Parameters
----------
scope : Construct
Scope within which this construct is defined
id: str
Identifier of the destination
environment_id: str
Identifier of the environment
bucket_name: str
The name of the S3 bucket.
bucket_folder: Optional[str]
An optional parameter to set a folder name in the S3 bucket.
If provided, tables are created in the path *bucketFolder* / *schema_name* / *table_name* / .
If this parameter isn’t specified, the path used is *schema_name* / *table_name* / .
service_access_role_arn: Optional[str]
An IAM role that should be able to access the specified bucket.
If no bucket is specified a role with required permissions will
be created for you.
external_table_definition: Optional[str]
The external table definition.
Conditional: If S3 is used as a source then ExternalTableDefinition is required.
enable_statistics: Optional[bool]
A value that enables statistics for Parquet pages and row groups.
Choose true to enable statistics, false to disable.
Statistics include NULL , DISTINCT , MAX , and MIN values.
This parameter defaults to true .
This value is used for .parquet file format only.
max_file_size: Optional[int]
A value that specifies the maximum size (in KB) of any .csv file to be created
while migrating to an S3 target during full load
**endpoint_settings_s3_props: Any
Additional properties. For complete list of properties refer to CDK Documentation -
DMS Endpoints:
https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_dms/CfnEndpoint.html#s3settingsproperty
Returns
-------
dms.CfnEndpoint.S3SettingsProperty: dms.CfnEndpoint.S3SettingsProperty:
DMS Endpoint Settings for S3
"""
# Load and validate the config
endpoint_s3_config_props: Dict[str, Any] = DMSEndpointS3SettingsConfiguration().load(
Config().get_resource_config(
environment_id=environment_id,
id=id,
),
partial=["removal_policy"],
)
# Logic
if not service_access_role_arn:
service_access_role = Role(
scope, f"{id}-dms-service-role", assumed_by=ServicePrincipal("dms.amazonaws.com")
)
service_access_role.add_to_policy(
PolicyStatement(
resources=[f"arn:aws:s3:::{bucket_name}/*"],
actions=["s3:PutObject", "s3:DeleteObject", "s3:PutObjectTagging", "s3:GetObject"],
)
)
service_access_role.add_to_policy(
PolicyStatement(resources=[f"arn:aws:s3:::{bucket_name}"], actions=["s3:ListBucket"])
)
service_access_role_arn = service_access_role.role_arn
# Collect args
endpoint_s3_props = {
"bucket_name": bucket_name,
"bucket_folder": bucket_folder,
"enable_statistics": enable_statistics,
"external_table_definition": external_table_definition,
"max_file_size": max_file_size,
"service_access_role_arn": service_access_role_arn,
**endpoint_s3_props,
}
# Explicit ("hardcoded") props should always take precedence over config
for key, value in endpoint_s3_props.items():
if value is not None:
endpoint_s3_config_props[key] = value
# create dms endpoint
_logger.debug(f" dms s3 endpoint properties: {endpoint_s3_props}")
settings: dms.CfnEndpoint.S3SettingsProperty = dms.CfnEndpoint.S3SettingsProperty(**endpoint_s3_config_props)
return settings
@staticmethod
def endpoint(
scope: Construct,
id: str,
environment_id: str,
endpoint_type: str,
engine_name: str,
s3_settings: Union[dms.CfnEndpoint.S3SettingsProperty, None],
**endpoint_props: Any,
) -> dms.CfnEndpoint:
"""
Create and configure DMS endpoint.
This construct allows to configure parameters of the dms endpoint using ddk.json
configuration file depending on the `environment_id` in which the function is used.
Supported parameters are: ...
Parameters
----------
scope : Construct
Scope within which this construct is defined
id: str
Identifier of the destination
environment_id: str
Identifier of the environment
endpoint_type: str
The type of endpoint. Valid values are `source` and `target`.
engine_name: str
The type of engine for the endpoint, depending on the EndpointType value.
Valid values : mysql | oracle | postgres | mariadb | aurora | aurora-postgresql
| opensearch | redshift | s3 | db2 | azuredb | sybase | dynamodb | mongodb
| kinesis | kafka | elasticsearch | docdb | sqlserver | neptune
s3_settings: Union[dms.S3SettingsProperty, None]
Settings in JSON format for the source and target Amazon S3 endpoint.
For more information about other available settings, see
https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_dms/CfnEndpoint.html#s3settingsproperty
**endpoint_props: Any
Additional properties. For complete list of properties refer to CDK Documentation -
DMS Endpoints:
https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_dms/CfnEndpoint.html
Returns
-------
dms.CfnEndpoint: dms.CfnEndpoint
A DMS Endpoint
"""
# Load and validate the config
endpoint_config_props: Dict[str, Any] = DMSEndpointConfiguration().load(
Config().get_resource_config(
environment_id=environment_id,
id=id,
),
partial=["removal_policy"],
)
# Collect args
endpoint_props = {
"endpoint_type": endpoint_type,
"engine_name": engine_name,
"s3_settings": s3_settings,
**endpoint_props,
}
# Explicit ("hardcoded") props should always take precedence over config
for key, value in endpoint_props.items():
if value is not None:
endpoint_config_props[key] = value
# create dms endpoint
_logger.debug(f" dms endpoint properties: {endpoint_props}")
endpoint: dms.CfnEndpoint = dms.CfnEndpoint(scope, id, **endpoint_config_props)
return endpoint
@staticmethod
def replication_task(
scope: Construct,
id: str,
environment_id: str,
replication_instance_arn: str,
source_endpoint_arn: str,
target_endpoint_arn: str,
table_mappings: str,
migration_type: str = "full-load",
replication_task_settings: Optional[str] = None,
**replication_task_props: Any,
) -> dms.CfnEndpoint:
"""
Create and configure DMS replication task.
This construct allows to configure parameters of the dms replication task using ddk.json
configuration file depending on the `environment_id` in which the function is used.
Supported parameters are: ...
Parameters
----------
scope : Construct
Scope within which this construct is defined
id: str
Identifier of the destination
environment_id: str
Identifier of the environment
migration_type: str
The migration type. Valid values: full-load | cdc | full-load-and-cdc
Default: 'full-load'
replication_instance_arn: str
The Amazon Resource Name (ARN) of a replication instance.
source_endpoint_arn: str
An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.
target_endpoint_arn: str
An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.
table_mappings: str
The table mappings for the task, in JSON format.
replication_task_settings: Optional[str]
Overall settings for the task, in JSON format. For more information,
see https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html
**replication_task_props: Any
Additional properties. For complete list of properties refer to CDK Documentation -
DMS Endpoints:
https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_dms/CfnReplicationTask.html
Returns
-------
dms.CfnReplicationTask: dms.CfnReplicationTask
A DMS Replication Task
"""
# Load and validate the config
replication_task_config_props: Dict[str, Any] = DMSReplicationTaskConfiguration().load(
Config().get_resource_config(
environment_id=environment_id,
id=id,
),
partial=["removal_policy"],
)
# Collect args
replication_task_props = {
"migration_type": migration_type,
"replication_instance_arn": replication_instance_arn,
"replication_task_settings": replication_task_settings,
"source_endpoint_arn": source_endpoint_arn,
"target_endpoint_arn": target_endpoint_arn,
"table_mappings": table_mappings,
**replication_task_props,
}
# Explicit ("hardcoded") props should always take precedence over config
for key, value in replication_task_props.items():
if value is not None:
replication_task_config_props[key] = value
# create dms endpoint
_logger.debug(f" dms replication task properties: {replication_task_props}")
replication_task: dms.CfnReplicationTask = dms.CfnReplicationTask(scope, id, **replication_task_config_props)
return replication_task
@staticmethod
def replication_instance(
scope: Construct,
id: str,
environment_id: str,
replication_instance_class: str,
allocated_storage: Optional[str] = None,
allow_major_version_upgrade: Optional[bool] = False,
auto_minor_version_upgrade: Optional[bool] = False,
availability_zone: Optional[str] = None,
engine_version: Optional[str] = None,
kms_key_id: Optional[str] = None,
multi_az: Optional[bool] = False,
preferred_maintenance_window: Optional[str] = None,
publicly_accessible: Optional[bool] = False,
replication_instance_identifier: Optional[str] = None,
replication_subnet_group_identifier: Optional[str] = None,
resource_identifier: Optional[str] = None,
vpc_security_group_ids: Optional[List[str]] = None,
**replication_instance_props: Any,
) -> dms.CfnReplicationInstance:
"""
Create and configure DMS replication instance.
This construct allows to configure parameters of the dms replication instance using ddk.json
configuration file depending on the `environment_id` in which the function is used.
Supported parameters are: ...
Parameters
----------
scope : Construct
Scope within which this construct is defined
id: str
Identifier of the destination
environment_id: str
Identifier of the environment
replication_instance_class: str
The compute and memory capacity of the replication instance
as defined for the specified replication instance class.
allocated_storage: Optional[str]
The amount of storage (in gigabytes) to be initially | |
import numpy as np
from dlbeamformer_utilities import compute_mvdr_tf_beamformers, check_distortless_constraint, compute_steering_vectors,\
compute_null_controlling_tf_beamformers, compute_null_controlling_minibatch_tf_beamformers,\
compute_null_controlling_tf_beamformers_2
from tqdm import tnrange, tqdm
from sklearn.linear_model import orthogonal_mp_gram
from omp import omp
class DictionaryLearningBeamformer(object):
def __init__(self, array_geometry, sampling_frequency,
source_angles, stft_params, angle_grid, diagonal_loading_param=1, bf_type="NC"):
"""
Parameters
----------
array_geometry: 2-D numpy array describing the geometry of the microphone array
sampling_frequency
stft_params: Dictionary of STFT transform parameters including
stft_params["n_samples_per_frame"]
stft_params["n_fft_bins"]
stft_params["hop_size"]
stft_params["window"]
bf_type: Type of the beamformer
"""
self.array_geometry = array_geometry
self.sampling_frequency = sampling_frequency
self.source_angles = source_angles
self.stft_params = stft_params
self.angle_grid = angle_grid
self.diagonal_loading_param = diagonal_loading_param
self.bf_type = bf_type
self.weights_ = None
self.source_steering_vectors = self._compute_source_steering_vectors()
self.steering_vectors = self._compute_steering_vectors()
def _compute_source_steering_vectors(self):
source_steering_vectors = []
for i_source_angle, source_angle in enumerate(self.source_angles):
v = compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
source_angle["elevation"], source_angle["azimuth"])
source_steering_vectors.append(v)
return source_steering_vectors
def _compute_steering_vectors(self):
return compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
self.angle_grid["elevation"], self.angle_grid["azimuth"])
def _compute_weights(self, training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99, batch_size=1, n_atoms_each_config=1):
n_configurations = len(training_data)
n_train_samples_each_config, n_fft_bins, n_mics, _ = training_data[0][0].shape
n_sources = len(self.source_steering_vectors)
D = np.zeros((n_sources, n_fft_bins, n_mics, n_configurations*n_atoms_each_config), dtype=complex)
for i_source in range(n_sources):
for i_configuration in tqdm(range(n_configurations), desc="Training configuration"):
for i_atom in range(n_atoms_each_config):
batch_indices = np.random.choice(len(training_data[i_configuration][0]), batch_size, replace=True)
tf_sample_covariance_batch = training_data[i_configuration][0][batch_indices]
# print(training_data[i_configuration][0].shape, tf_sample_covariance_batch.shape, batch_indices)
null_azimuth_range = self._compute_null_angle_ranges(
training_data[i_configuration][1]["azimuth"], desired_null_width)
null_steering_vectors = compute_steering_vectors(
self.array_geometry, self.sampling_frequency,
self.stft_params["n_fft_bins"],
np.unique(training_data[i_configuration][1]["elevation"]),
np.unique(null_azimuth_range)
)
null_steering_vectors = np.transpose(null_steering_vectors[:, :, 0, :], (0, 2, 1))
w = compute_null_controlling_tf_beamformers_2(
self.source_steering_vectors[i_source][:, 0, 0, :], null_steering_vectors,
tf_sample_covariance_batch,
null_constraint_threshold,
eigenvalue_percentage_threshold=0.99, diagonal_loading_param=self.diagonal_loading_param)
D[i_source, :, :, i_configuration*n_atoms_each_config + i_atom] = w
return D
def _compute_null_angle_ranges(self, null_angles, desired_null_width):
angle_ranges = []
for null_angle in null_angles:
angle_ranges.append(
np.arange(null_angle - desired_null_width/2,
null_angle + desired_null_width/2, 0.1))
return np.concatenate(angle_ranges)
# def _initialize(self, X):
# pass
def _choose_weights(self, source_angle_index, x):
weights_ = self.weights_[source_angle_index]
n_fft_bins, n_mics, n_dictionary_atoms = weights_.shape
min_ave_energy = np.inf
optimal_weight_index = None
for i_dictionary_atom in range(n_dictionary_atoms):
w_frequency = weights_[:, :, i_dictionary_atom]
energy = 0
n_fft_bins = w_frequency.shape[0]
for i_fft_bin in range(n_fft_bins):
w = w_frequency[i_fft_bin]
R = x[i_fft_bin].dot(x[i_fft_bin].transpose().conjugate())
energy += np.real(w.transpose().conjugate().dot(R).dot(w))
ave_energy = energy / n_fft_bins
if min_ave_energy > ave_energy:
min_ave_energy = ave_energy
optimal_weight_index = i_dictionary_atom
optimal_weights = weights_[:, :, optimal_weight_index]
# optimal_weights = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
# # for i_fft_bin in tqdm(range(n_fft_bins), desc="FFT bin"):
# for i_fft_bin in range(n_fft_bins):
# R = x[i_fft_bin].dot(x[i_fft_bin].transpose().conjugate()) + 1*self.diagonal_loading_param*np.identity(n_mics)
# W = weights_[i_fft_bin]
# i_fft_optimal_weight_index = np.argmin(np.diagonal(np.abs(W.transpose().conjugate().dot(
# R).dot(W))))
# optimal_weights[i_fft_bin] = weights_[i_fft_bin, :, i_fft_optimal_weight_index]
return optimal_weights
def fit(self, training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99, batch_size=1, n_atoms_each_config=1):
"""
Parameters
----------
"""
D = self._compute_weights(training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold, batch_size, n_atoms_each_config)
self.weights_ = D
return self
def choose_weights(self, source_angle_index, x):
return self._choose_weights(source_angle_index, x)
class DLBeamformer(object):
def __init__(self, array_geometry, sampling_frequency,
source_angles, stft_params, angle_grid, diagonal_loading_param=1,
n_dict_atoms=None, n_nonzero_coefficients=None,
n_train_max_iterations=100, train_error_tolerance=1e-6, bf_type=None):
"""
Parameters
----------
array_geometry: 2-D numpy array describing the geometry of the microphone array
sampling_frequency
stft_params: Dictionary of STFT transform parameters including
stft_params["n_samples_per_frame"]
stft_params["n_fft_bins"]
stft_params["hop_size"]
stft_params["window"]
bf_type: Type of the beamformer
"""
print("Initialize DLBeamformer")
self.array_geometry = array_geometry
self.sampling_frequency = sampling_frequency
self.source_angles = source_angles
self.stft_params = stft_params
self.angle_grid = angle_grid
self.diagonal_loading_param = diagonal_loading_param
self.bf_type = bf_type
self.weights_ = None
self.source_steering_vectors = self._compute_source_steering_vectors()
self.steering_vectors = self._compute_steering_vectors()
self.n_dict_atoms = n_dict_atoms
self.n_train_max_iterations = n_train_max_iterations
self.n_nonzero_coefficients = n_nonzero_coefficients
self.train_error_tolerance = train_error_tolerance
self.training_loss = []
def _compute_source_steering_vectors(self):
source_steering_vectors = []
for i_source_angle, source_angle in enumerate(self.source_angles):
v = compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
source_angle["elevation"], source_angle["azimuth"])
source_steering_vectors.append(v)
return source_steering_vectors
def _compute_steering_vectors(self):
return compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
self.angle_grid["elevation"], self.angle_grid["azimuth"])
def _compute_weights(self, training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99,
batch_size=1, n_train_batches_each_config=1, random_seed=0):
np.random.seed(random_seed)
n_configurations = len(training_data)
n_train_samples_each_config, n_fft_bins, n_mics, _ = training_data[0][0].shape
n_sources = len(self.source_steering_vectors)
if self.n_dict_atoms == None:
self.n_dict_atoms = n_configurations
# Initialization
# dictionary = np.random.randn(n_sources, n_fft_bins, n_mics, self.n_dict_atoms) + \
# 1j*np.random.randn(n_sources, n_fft_bins, n_mics, self.n_dict_atoms)
# coefficients = np.zeros((n_sources, n_fft_bins, self.n_dict_atoms,
# n_configurations*n_train_batches_each_config),
# dtype=np.complex64)
coefficients = np.random.randn(n_sources, n_fft_bins, self.n_dict_atoms,
n_configurations*n_train_batches_each_config)\
+ 1j*np.random.randn(n_sources, n_fft_bins, self.n_dict_atoms,
n_configurations*n_train_batches_each_config)
dictionary = np.zeros((n_sources, n_fft_bins, n_mics, self.n_dict_atoms), dtype=np.complex64)
# Compute desired weights
desired_weights = np.zeros((n_sources, n_fft_bins, n_mics,
n_configurations*n_train_batches_each_config),
dtype=np.complex64)
# for i_configuration in tqdm(range(n_configurations), desc=" Training configuration"):
for i_source in range(n_sources):
for i_configuration in range(n_configurations):
for i_batch in range(n_train_batches_each_config):
# Get a batch of data
batch_indices = np.random.choice(len(training_data[i_configuration][0]), batch_size, replace=True)
tf_sample_covariance_batch = training_data[i_configuration][0][batch_indices]
# Compute null steering vectors for nulling constraints
null_azimuth_range = self._compute_null_angle_ranges(
training_data[i_configuration][1]["azimuth"], desired_null_width)
null_steering_vectors = compute_steering_vectors(
self.array_geometry, self.sampling_frequency,
self.stft_params["n_fft_bins"],
np.unique(training_data[i_configuration][1]["elevation"]),
np.unique(null_azimuth_range)
)
null_steering_vectors = np.transpose(null_steering_vectors[:, :, 0, :], (0, 2, 1))
# Compute desired weights for the selected batch of data
w = compute_null_controlling_tf_beamformers_2(
self.source_steering_vectors[i_source][:, 0, 0, :], null_steering_vectors,
tf_sample_covariance_batch,
null_constraint_threshold,
eigenvalue_percentage_threshold=0.99, diagonal_loading_param=self.diagonal_loading_param)
desired_weights[i_source, :, :, i_configuration*n_train_batches_each_config + i_batch] = w
n_desired_weights = desired_weights.shape[3]
# Training loop
for i_source in range(n_sources):
for i_train_iteration in tqdm(range(self.n_train_max_iterations), desc="Training iteration"):
# Each config
i_iteration_train_loss = 0
# Update dictionary given the sparse coeficients
for i_fft_bin in range(n_fft_bins):
### Update dictionary
dictionary[i_source][i_fft_bin] = desired_weights[i_source][i_fft_bin].dot(
np.linalg.pinv(coefficients[i_source][i_fft_bin])
)
# Update sparse coefficients given the dictionary
for i_fft_bin in range(n_fft_bins):
for i_sample in range(n_desired_weights):
coefficients[i_source, i_fft_bin, :, i_sample] = omp(
dictionary[i_source][i_fft_bin],
desired_weights[i_source, i_fft_bin, :, i_sample],
nonneg=False, ncoef=self.n_nonzero_coefficients,
tol=self.train_error_tolerance, verbose=False
).coef
for i_fft_bin in range(n_fft_bins):
i_iteration_train_loss += 0.5 * (1./n_desired_weights) \
* np.linalg.norm(
dictionary[i_source][i_fft_bin].dot(
coefficients[i_source][i_fft_bin]) - \
desired_weights[i_source][i_fft_bin]
)**2
i_iteration_train_loss = i_iteration_train_loss / n_fft_bins
print("\t\tTrain loss at current iteration {:.9f}".format(i_iteration_train_loss))
self.training_loss.append(i_iteration_train_loss)
return dictionary, coefficients, desired_weights
def _compute_null_angle_ranges(self, null_angles, desired_null_width):
angle_ranges = []
for null_angle in null_angles:
angle_ranges.append(
np.arange(null_angle - desired_null_width/2,
null_angle + desired_null_width/2, 0.1))
return np.concatenate(angle_ranges)
# def _initialize(self, X):
# pass
def _choose_weights(self, source_angle_index, x):
weights_ = self.weights_[source_angle_index]
n_fft_bins, n_mics, n_dictionary_atoms = weights_.shape
min_ave_energy = np.inf
optimal_weight_index = None
for i_dictionary_atom in range(n_dictionary_atoms):
w_frequency = weights_[:, :, i_dictionary_atom]
energy = 0
n_fft_bins = w_frequency.shape[0]
for i_fft_bin in range(n_fft_bins):
w = w_frequency[i_fft_bin]
R = x[i_fft_bin].dot(x[i_fft_bin].transpose().conjugate())
energy += np.real(w.transpose().conjugate().dot(R).dot(w))
ave_energy = energy / n_fft_bins
if min_ave_energy > ave_energy:
min_ave_energy = ave_energy
optimal_weight_index = i_dictionary_atom
optimal_weights = weights_[:, :, optimal_weight_index]
# optimal_weights = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
# # for i_fft_bin in tqdm(range(n_fft_bins), desc="FFT bin"):
# for i_fft_bin in range(n_fft_bins):
# R = x[i_fft_bin].dot(x[i_fft_bin].transpose().conjugate()) + 1*self.diagonal_loading_param*np.identity(n_mics)
# W = weights_[i_fft_bin]
# i_fft_optimal_weight_index = np.argmin(np.diagonal(np.abs(W.transpose().conjugate().dot(
# R).dot(W))))
# optimal_weights[i_fft_bin] = weights_[i_fft_bin, :, i_fft_optimal_weight_index]
return optimal_weights
def fit(self, training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99,
batch_size=1, n_train_batches_each_config=1, random_seed=0):
"""
Parameters
----------
"""
D = self._compute_weights(training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold,
batch_size, n_train_batches_each_config, random_seed)
self.weights_ = D
return self
def choose_weights(self, source_angle_index, x):
return self._choose_weights(source_angle_index, x)
class DLBatchBeamformer(object):
def __init__(self, array_geometry, sampling_frequency,
source_angles, stft_params, angle_grid, bf_type="NC"):
"""
Parameters
----------
array_geometry: 2-D numpy array describing the geometry of the microphone array
sampling_frequency
stft_params: Dictionary of STFT transform parameters including
stft_params["n_samples_per_frame"]
stft_params["n_fft_bins"]
stft_params["hop_size"]
stft_params["window"]
bf_type: Type of the beamformer
"""
print("Initialize DL Batch Beamformer")
self.array_geometry = array_geometry
self.sampling_frequency = sampling_frequency
self.source_angles = source_angles
self.stft_params = stft_params
self.angle_grid = angle_grid
self.bf_type = bf_type
self.weights_ = None
self.source_steering_vectors = self._compute_source_steering_vectors()
self.steering_vectors = self._compute_steering_vectors()
def _compute_source_steering_vectors(self):
source_steering_vectors = []
for i_source_angle, source_angle in enumerate(self.source_angles):
v = compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
source_angle["theta"], source_angle["phi"])
source_steering_vectors.append(v)
return source_steering_vectors
def _compute_steering_vectors(self):
return compute_steering_vectors(self.array_geometry,
self.sampling_frequency, self.stft_params["n_fft_bins"],
self.angle_grid["theta"], self.angle_grid["phi"])
def _compute_weights(self, training_data, desired_null_width,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99,
batch_size=1, n_atoms_each_config=1):
n_configurations = len(training_data)
n_fft_bins, n_mics, _ = training_data[0][0][0].shape
n_sources = len(self.source_steering_vectors)
D = np.zeros((n_sources, n_fft_bins, n_mics, n_configurations*n_atoms_each_config), dtype=complex)
for i_source in range(n_sources):
for i_configuration in tqdm(range(n_configurations), desc="Configuration"):
for i_atom in range(n_atoms_each_config):
train_data_indices = np.random.choice(len(training_data[i_configuration][0]), batch_size)
tf_sample_covariance_matrices = training_data[i_configuration][0][train_data_indices]
null_angle_range = self._compute_null_angle_ranges(
training_data[i_configuration][1]["theta"], desired_null_width)
null_steering_vectors = compute_steering_vectors(
self.array_geometry, self.sampling_frequency,
self.stft_params["n_fft_bins"],
np.unique(null_angle_range), np.unique(training_data[i_configuration][1]["phi"])
)
null_steering_vectors = np.transpose(null_steering_vectors[:, :, 0, :], (0, 2, 1))
w = compute_null_controlling_minibatch_tf_beamformers(
self.source_steering_vectors[i_source][:, 0, 0, :], null_steering_vectors,
tf_sample_covariance_matrices,
null_constraint_threshold,
eigenvalue_percentage_threshold=0.99)
D[i_source, :, :, i_configuration*n_atoms_each_config + i_atom] = w
return D
def _compute_null_angle_ranges(self, null_thetas, desired_null_width):
theta_ranges = []
for null_theta in null_thetas:
theta_ranges.append(
np.arange(null_theta - desired_null_width/2,
null_theta + desired_null_width/2, 0.1))
return np.concatenate(theta_ranges)
# def _initialize(self, X):
# pass
def _choose_weights(self, source_angle_index, x):
weights_ = self.weights_[source_angle_index]
n_fft_bins, n_mics, n_dictionary_atoms = weights_.shape
# min_ave_energy = np.inf
# optimal_weight_index = None
# for i_dictionary_atom in range(n_dictionary_atoms):
# w_frequency = weights_[:, :, i_dictionary_atom]
# energy = 0
# n_fft_bins = w_frequency.shape[0]
# for i_fft_bin in range(n_fft_bins):
# w = w_frequency[i_fft_bin]
# R = x[i_fft_bin].dot(x[i_fft_bin].transpose().conjugate())
# energy += np.real(w.transpose().conjugate().dot(R).dot(w))
# ave_energy = energy / n_fft_bins
# if min_ave_energy > ave_energy:
# min_ave_energy = ave_energy
# optimal_weight_index = i_dictionary_atom
# optimal_weight | |
Constraint(expr=m.x126*m.x2512 + m.x751*m.x2518 + m.x1376*m.x2524 + m.x2001*m.x2530 <= 8)
m.c145 = Constraint(expr=m.x127*m.x2512 + m.x752*m.x2518 + m.x1377*m.x2524 + m.x2002*m.x2530 <= 8)
m.c146 = Constraint(expr=m.x128*m.x2512 + m.x753*m.x2518 + m.x1378*m.x2524 + m.x2003*m.x2530 <= 8)
m.c147 = Constraint(expr=m.x129*m.x2512 + m.x754*m.x2518 + m.x1379*m.x2524 + m.x2004*m.x2530 <= 8)
m.c148 = Constraint(expr=m.x130*m.x2512 + m.x755*m.x2518 + m.x1380*m.x2524 + m.x2005*m.x2530 <= 8)
m.c149 = Constraint(expr=m.x131*m.x2512 + m.x756*m.x2518 + m.x1381*m.x2524 + m.x2006*m.x2530 <= 8)
m.c150 = Constraint(expr=m.x132*m.x2512 + m.x757*m.x2518 + m.x1382*m.x2524 + m.x2007*m.x2530 <= 8)
m.c151 = Constraint(expr=m.x133*m.x2512 + m.x758*m.x2518 + m.x1383*m.x2524 + m.x2008*m.x2530 <= 8)
m.c152 = Constraint(expr=m.x134*m.x2512 + m.x759*m.x2518 + m.x1384*m.x2524 + m.x2009*m.x2530 <= 8)
m.c153 = Constraint(expr=m.x135*m.x2512 + m.x760*m.x2518 + m.x1385*m.x2524 + m.x2010*m.x2530 <= 8)
m.c154 = Constraint(expr=m.x136*m.x2512 + m.x761*m.x2518 + m.x1386*m.x2524 + m.x2011*m.x2530 <= 8)
m.c155 = Constraint(expr=m.x137*m.x2512 + m.x762*m.x2518 + m.x1387*m.x2524 + m.x2012*m.x2530 <= 8)
m.c156 = Constraint(expr=m.x138*m.x2512 + m.x763*m.x2518 + m.x1388*m.x2524 + m.x2013*m.x2530 <= 8)
m.c157 = Constraint(expr=m.x139*m.x2512 + m.x764*m.x2518 + m.x1389*m.x2524 + m.x2014*m.x2530 <= 8)
m.c158 = Constraint(expr=m.x140*m.x2512 + m.x765*m.x2518 + m.x1390*m.x2524 + m.x2015*m.x2530 <= 8)
m.c159 = Constraint(expr=m.x141*m.x2512 + m.x766*m.x2518 + m.x1391*m.x2524 + m.x2016*m.x2530 <= 8)
m.c160 = Constraint(expr=m.x142*m.x2512 + m.x767*m.x2518 + m.x1392*m.x2524 + m.x2017*m.x2530 <= 8)
m.c161 = Constraint(expr=m.x143*m.x2512 + m.x768*m.x2518 + m.x1393*m.x2524 + m.x2018*m.x2530 <= 8)
m.c162 = Constraint(expr=m.x144*m.x2512 + m.x769*m.x2518 + m.x1394*m.x2524 + m.x2019*m.x2530 <= 8)
m.c163 = Constraint(expr=m.x145*m.x2512 + m.x770*m.x2518 + m.x1395*m.x2524 + m.x2020*m.x2530 <= 8)
m.c164 = Constraint(expr=m.x146*m.x2512 + m.x771*m.x2518 + m.x1396*m.x2524 + m.x2021*m.x2530 <= 8)
m.c165 = Constraint(expr=m.x147*m.x2512 + m.x772*m.x2518 + m.x1397*m.x2524 + m.x2022*m.x2530 <= 8)
m.c166 = Constraint(expr=m.x148*m.x2512 + m.x773*m.x2518 + m.x1398*m.x2524 + m.x2023*m.x2530 <= 8)
m.c167 = Constraint(expr=m.x149*m.x2512 + m.x774*m.x2518 + m.x1399*m.x2524 + m.x2024*m.x2530 <= 8)
m.c168 = Constraint(expr=m.x150*m.x2512 + m.x775*m.x2518 + m.x1400*m.x2524 + m.x2025*m.x2530 <= 8)
m.c169 = Constraint(expr=m.x151*m.x2512 + m.x776*m.x2518 + m.x1401*m.x2524 + m.x2026*m.x2530 <= 8)
m.c170 = Constraint(expr=m.x152*m.x2512 + m.x777*m.x2518 + m.x1402*m.x2524 + m.x2027*m.x2530 <= 8)
m.c171 = Constraint(expr=m.x153*m.x2512 + m.x778*m.x2518 + m.x1403*m.x2524 + m.x2028*m.x2530 <= 8)
m.c172 = Constraint(expr=m.x154*m.x2512 + m.x779*m.x2518 + m.x1404*m.x2524 + m.x2029*m.x2530 <= 8)
m.c173 = Constraint(expr=m.x155*m.x2512 + m.x780*m.x2518 + m.x1405*m.x2524 + m.x2030*m.x2530 <= 8)
m.c174 = Constraint(expr=m.x156*m.x2512 + m.x781*m.x2518 + m.x1406*m.x2524 + m.x2031*m.x2530 <= 8)
m.c175 = Constraint(expr=m.x157*m.x2512 + m.x782*m.x2518 + m.x1407*m.x2524 + m.x2032*m.x2530 <= 8)
m.c176 = Constraint(expr=m.x158*m.x2512 + m.x783*m.x2518 + m.x1408*m.x2524 + m.x2033*m.x2530 <= 8)
m.c177 = Constraint(expr=m.x159*m.x2512 + m.x784*m.x2518 + m.x1409*m.x2524 + m.x2034*m.x2530 <= 8)
m.c178 = Constraint(expr=m.x160*m.x2512 + m.x785*m.x2518 + m.x1410*m.x2524 + m.x2035*m.x2530 <= 8)
m.c179 = Constraint(expr=m.x161*m.x2512 + m.x786*m.x2518 + m.x1411*m.x2524 + m.x2036*m.x2530 <= 8)
m.c180 = Constraint(expr=m.x162*m.x2512 + m.x787*m.x2518 + m.x1412*m.x2524 + m.x2037*m.x2530 <= 8)
m.c181 = Constraint(expr=m.x163*m.x2512 + m.x788*m.x2518 + m.x1413*m.x2524 + m.x2038*m.x2530 <= 8)
m.c182 = Constraint(expr=m.x164*m.x2512 + m.x789*m.x2518 + m.x1414*m.x2524 + m.x2039*m.x2530 <= 8)
m.c183 = Constraint(expr=m.x165*m.x2512 + m.x790*m.x2518 + m.x1415*m.x2524 + m.x2040*m.x2530 <= 8)
m.c184 = Constraint(expr=m.x166*m.x2512 + m.x791*m.x2518 + m.x1416*m.x2524 + m.x2041*m.x2530 <= 8)
m.c185 = Constraint(expr=m.x167*m.x2512 + m.x792*m.x2518 + m.x1417*m.x2524 + m.x2042*m.x2530 <= 8)
m.c186 = Constraint(expr=m.x168*m.x2512 + m.x793*m.x2518 + m.x1418*m.x2524 + m.x2043*m.x2530 <= 8)
m.c187 = Constraint(expr=m.x169*m.x2512 + m.x794*m.x2518 + m.x1419*m.x2524 + m.x2044*m.x2530 <= 8)
m.c188 = Constraint(expr=m.x170*m.x2512 + m.x795*m.x2518 + m.x1420*m.x2524 + m.x2045*m.x2530 <= 8)
m.c189 = Constraint(expr=m.x171*m.x2512 + m.x796*m.x2518 + m.x1421*m.x2524 + m.x2046*m.x2530 <= 8)
m.c190 = Constraint(expr=m.x172*m.x2512 + m.x797*m.x2518 + m.x1422*m.x2524 + m.x2047*m.x2530 <= 8)
m.c191 = Constraint(expr=m.x173*m.x2512 + m.x798*m.x2518 + m.x1423*m.x2524 + m.x2048*m.x2530 <= 8)
m.c192 = Constraint(expr=m.x174*m.x2512 + m.x799*m.x2518 + m.x1424*m.x2524 + m.x2049*m.x2530 <= 8)
m.c193 = Constraint(expr=m.x175*m.x2512 + m.x800*m.x2518 + m.x1425*m.x2524 + m.x2050*m.x2530 <= 8)
m.c194 = Constraint(expr=m.x176*m.x2512 + m.x801*m.x2518 + m.x1426*m.x2524 + m.x2051*m.x2530 <= 8)
m.c195 = Constraint(expr=m.x177*m.x2512 + m.x802*m.x2518 + m.x1427*m.x2524 + m.x2052*m.x2530 <= 8)
m.c196 = Constraint(expr=m.x178*m.x2512 + m.x803*m.x2518 + m.x1428*m.x2524 + m.x2053*m.x2530 <= 8)
m.c197 = Constraint(expr=m.x179*m.x2512 + m.x804*m.x2518 + m.x1429*m.x2524 + m.x2054*m.x2530 <= 8)
m.c198 = Constraint(expr=m.x180*m.x2512 + m.x805*m.x2518 + m.x1430*m.x2524 + m.x2055*m.x2530 <= 8)
m.c199 = Constraint(expr=m.x181*m.x2512 + m.x806*m.x2518 + m.x1431*m.x2524 + m.x2056*m.x2530 <= 8)
m.c200 = Constraint(expr=m.x182*m.x2512 + m.x807*m.x2518 + m.x1432*m.x2524 + m.x2057*m.x2530 <= 8)
m.c201 = Constraint(expr=m.x183*m.x2512 + m.x808*m.x2518 + m.x1433*m.x2524 + m.x2058*m.x2530 <= 8)
m.c202 = Constraint(expr=m.x184*m.x2512 + m.x809*m.x2518 + m.x1434*m.x2524 + m.x2059*m.x2530 <= 8)
m.c203 = Constraint(expr=m.x185*m.x2512 + m.x810*m.x2518 + m.x1435*m.x2524 + m.x2060*m.x2530 <= 8)
m.c204 = Constraint(expr=m.x186*m.x2512 + m.x811*m.x2518 + m.x1436*m.x2524 + m.x2061*m.x2530 <= 8)
m.c205 = Constraint(expr=m.x187*m.x2512 + m.x812*m.x2518 + m.x1437*m.x2524 + m.x2062*m.x2530 <= 8)
m.c206 = Constraint(expr=m.x188*m.x2512 + m.x813*m.x2518 + m.x1438*m.x2524 + m.x2063*m.x2530 <= 8)
m.c207 = Constraint(expr=m.x189*m.x2512 + m.x814*m.x2518 + m.x1439*m.x2524 + m.x2064*m.x2530 <= 8)
m.c208 = Constraint(expr=m.x190*m.x2512 + m.x815*m.x2518 + m.x1440*m.x2524 + m.x2065*m.x2530 <= 8)
m.c209 = Constraint(expr=m.x191*m.x2512 + m.x816*m.x2518 + m.x1441*m.x2524 + m.x2066*m.x2530 <= 8)
m.c210 = Constraint(expr=m.x192*m.x2512 + m.x817*m.x2518 + m.x1442*m.x2524 + m.x2067*m.x2530 <= 8)
m.c211 = Constraint(expr=m.x193*m.x2512 + m.x818*m.x2518 + m.x1443*m.x2524 + m.x2068*m.x2530 <= 8)
m.c212 = Constraint(expr=m.x194*m.x2512 + m.x819*m.x2518 + m.x1444*m.x2524 + m.x2069*m.x2530 <= 8)
m.c213 = Constraint(expr=m.x195*m.x2512 + m.x820*m.x2518 + m.x1445*m.x2524 + m.x2070*m.x2530 <= 8)
m.c214 = Constraint(expr=m.x196*m.x2512 + m.x821*m.x2518 + m.x1446*m.x2524 + m.x2071*m.x2530 <= 8)
m.c215 = Constraint(expr=m.x197*m.x2512 + m.x822*m.x2518 + m.x1447*m.x2524 + m.x2072*m.x2530 <= 8)
m.c216 = Constraint(expr=m.x198*m.x2512 + m.x823*m.x2518 + m.x1448*m.x2524 + m.x2073*m.x2530 <= 8)
m.c217 = Constraint(expr=m.x199*m.x2512 + m.x824*m.x2518 + m.x1449*m.x2524 + m.x2074*m.x2530 <= 8)
m.c218 = Constraint(expr=m.x200*m.x2512 + m.x825*m.x2518 + m.x1450*m.x2524 + m.x2075*m.x2530 <= 8)
m.c219 = Constraint(expr=m.x201*m.x2512 + m.x826*m.x2518 + m.x1451*m.x2524 + m.x2076*m.x2530 <= 8)
m.c220 = Constraint(expr=m.x202*m.x2512 + m.x827*m.x2518 + m.x1452*m.x2524 + m.x2077*m.x2530 <= 8)
m.c221 = Constraint(expr=m.x203*m.x2512 + m.x828*m.x2518 + m.x1453*m.x2524 + m.x2078*m.x2530 <= 8)
m.c222 = Constraint(expr=m.x204*m.x2512 + m.x829*m.x2518 + m.x1454*m.x2524 + m.x2079*m.x2530 <= 8)
m.c223 = Constraint(expr=m.x205*m.x2512 + m.x830*m.x2518 + m.x1455*m.x2524 + m.x2080*m.x2530 <= 8)
m.c224 = Constraint(expr=m.x206*m.x2512 + m.x831*m.x2518 + m.x1456*m.x2524 + m.x2081*m.x2530 <= 8)
m.c225 = Constraint(expr=m.x207*m.x2512 + m.x832*m.x2518 + m.x1457*m.x2524 + m.x2082*m.x2530 <= 8)
m.c226 = Constraint(expr=m.x208*m.x2512 + m.x833*m.x2518 + m.x1458*m.x2524 + m.x2083*m.x2530 <= 8)
m.c227 = Constraint(expr=m.x209*m.x2512 + m.x834*m.x2518 + m.x1459*m.x2524 + m.x2084*m.x2530 <= 8)
m.c228 = Constraint(expr=m.x210*m.x2512 + m.x835*m.x2518 + m.x1460*m.x2524 + m.x2085*m.x2530 <= 8)
m.c229 = Constraint(expr=m.x211*m.x2512 + m.x836*m.x2518 + m.x1461*m.x2524 + m.x2086*m.x2530 <= 8)
m.c230 = Constraint(expr=m.x212*m.x2512 + m.x837*m.x2518 + m.x1462*m.x2524 + m.x2087*m.x2530 <= 8)
m.c231 = Constraint(expr=m.x213*m.x2512 + m.x838*m.x2518 + m.x1463*m.x2524 + m.x2088*m.x2530 <= 8)
m.c232 = Constraint(expr=m.x214*m.x2512 + m.x839*m.x2518 + m.x1464*m.x2524 + m.x2089*m.x2530 <= 8)
m.c233 = Constraint(expr=m.x215*m.x2512 + m.x840*m.x2518 + m.x1465*m.x2524 + m.x2090*m.x2530 <= 8)
m.c234 = Constraint(expr=m.x216*m.x2512 + m.x841*m.x2518 + m.x1466*m.x2524 + m.x2091*m.x2530 <= 8)
m.c235 = Constraint(expr=m.x217*m.x2512 + m.x842*m.x2518 + m.x1467*m.x2524 + m.x2092*m.x2530 <= 8)
m.c236 = Constraint(expr=m.x218*m.x2512 + m.x843*m.x2518 + m.x1468*m.x2524 + m.x2093*m.x2530 <= 8)
m.c237 = Constraint(expr=m.x219*m.x2512 + m.x844*m.x2518 + m.x1469*m.x2524 + m.x2094*m.x2530 <= 8)
m.c238 = Constraint(expr=m.x220*m.x2512 + m.x845*m.x2518 + m.x1470*m.x2524 + m.x2095*m.x2530 <= 8)
m.c239 = Constraint(expr=m.x221*m.x2512 + m.x846*m.x2518 + m.x1471*m.x2524 + m.x2096*m.x2530 <= 8)
m.c240 = Constraint(expr=m.x222*m.x2512 + m.x847*m.x2518 + m.x1472*m.x2524 + m.x2097*m.x2530 <= 8)
m.c241 = Constraint(expr=m.x223*m.x2512 + m.x848*m.x2518 + m.x1473*m.x2524 + m.x2098*m.x2530 <= 8)
m.c242 = Constraint(expr=m.x224*m.x2512 + m.x849*m.x2518 + m.x1474*m.x2524 + m.x2099*m.x2530 <= 8)
m.c243 = Constraint(expr=m.x225*m.x2512 + m.x850*m.x2518 + m.x1475*m.x2524 + m.x2100*m.x2530 <= 8)
m.c244 = Constraint(expr=m.x226*m.x2512 + m.x851*m.x2518 + m.x1476*m.x2524 + m.x2101*m.x2530 <= 8)
m.c245 = Constraint(expr=m.x227*m.x2512 + m.x852*m.x2518 + m.x1477*m.x2524 + m.x2102*m.x2530 <= 8)
m.c246 = Constraint(expr=m.x228*m.x2512 + m.x853*m.x2518 + m.x1478*m.x2524 + m.x2103*m.x2530 <= 8)
m.c247 = Constraint(expr=m.x229*m.x2512 + m.x854*m.x2518 + m.x1479*m.x2524 + m.x2104*m.x2530 <= 8)
m.c248 = Constraint(expr=m.x230*m.x2512 + m.x855*m.x2518 + m.x1480*m.x2524 + m.x2105*m.x2530 <= 8)
m.c249 = Constraint(expr=m.x231*m.x2512 + m.x856*m.x2518 + m.x1481*m.x2524 + m.x2106*m.x2530 <= 8)
m.c250 = Constraint(expr=m.x232*m.x2512 + m.x857*m.x2518 + m.x1482*m.x2524 + m.x2107*m.x2530 <= 8)
m.c251 = Constraint(expr=m.x233*m.x2512 + m.x858*m.x2518 + m.x1483*m.x2524 + m.x2108*m.x2530 <= 8)
m.c252 = Constraint(expr=m.x234*m.x2512 + m.x859*m.x2518 + m.x1484*m.x2524 + m.x2109*m.x2530 <= 8)
m.c253 = Constraint(expr=m.x235*m.x2512 + m.x860*m.x2518 + m.x1485*m.x2524 + m.x2110*m.x2530 <= 8)
m.c254 = Constraint(expr=m.x236*m.x2512 + m.x861*m.x2518 + m.x1486*m.x2524 + m.x2111*m.x2530 <= 8)
m.c255 = Constraint(expr=m.x237*m.x2512 + m.x862*m.x2518 + m.x1487*m.x2524 + m.x2112*m.x2530 <= 8)
m.c256 = Constraint(expr=m.x238*m.x2512 + m.x863*m.x2518 + m.x1488*m.x2524 + m.x2113*m.x2530 <= 8)
m.c257 = Constraint(expr=m.x239*m.x2512 + m.x864*m.x2518 + m.x1489*m.x2524 + m.x2114*m.x2530 <= 8)
m.c258 = Constraint(expr=m.x240*m.x2512 + m.x865*m.x2518 + m.x1490*m.x2524 + m.x2115*m.x2530 <= 8)
m.c259 = Constraint(expr=m.x241*m.x2512 + m.x866*m.x2518 + m.x1491*m.x2524 + m.x2116*m.x2530 <= 8)
m.c260 = Constraint(expr=m.x242*m.x2512 + m.x867*m.x2518 + m.x1492*m.x2524 + m.x2117*m.x2530 <= 8)
m.c261 = Constraint(expr=m.x243*m.x2512 + m.x868*m.x2518 + m.x1493*m.x2524 + m.x2118*m.x2530 <= 8)
m.c262 = Constraint(expr=m.x244*m.x2512 + m.x869*m.x2518 + m.x1494*m.x2524 + m.x2119*m.x2530 <= 8)
m.c263 = Constraint(expr=m.x245*m.x2512 + m.x870*m.x2518 + m.x1495*m.x2524 + m.x2120*m.x2530 <= 8)
m.c264 = Constraint(expr=m.x246*m.x2512 + m.x871*m.x2518 + m.x1496*m.x2524 + m.x2121*m.x2530 <= 8)
m.c265 = Constraint(expr=m.x247*m.x2512 + m.x872*m.x2518 + m.x1497*m.x2524 + m.x2122*m.x2530 <= 8)
m.c266 = Constraint(expr=m.x248*m.x2512 + m.x873*m.x2518 + m.x1498*m.x2524 + m.x2123*m.x2530 <= 8)
m.c267 = Constraint(expr=m.x249*m.x2512 + m.x874*m.x2518 + m.x1499*m.x2524 + m.x2124*m.x2530 <= 8)
m.c268 = Constraint(expr=m.x250*m.x2512 + m.x875*m.x2518 + m.x1500*m.x2524 + m.x2125*m.x2530 <= 8)
m.c269 = Constraint(expr=m.x251*m.x2512 + m.x876*m.x2518 + m.x1501*m.x2524 + m.x2126*m.x2530 <= 8)
m.c270 = Constraint(expr=m.x252*m.x2512 + m.x877*m.x2518 + m.x1502*m.x2524 + m.x2127*m.x2530 <= 8)
m.c271 = Constraint(expr=m.x253*m.x2512 + m.x878*m.x2518 + m.x1503*m.x2524 + m.x2128*m.x2530 <= 8)
m.c272 = | |
accelerator_types=[
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
],
next_page_token="abc",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[], next_page_token="def",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[cloud_tpu.AcceleratorType(),], next_page_token="ghi",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
],
),
RuntimeError,
)
async_pager = await client.list_accelerator_types(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_tpu.AcceleratorType) for i in responses)
@pytest.mark.asyncio
async def test_list_accelerator_types_async_pages():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_accelerator_types),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
],
next_page_token="abc",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[], next_page_token="def",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[cloud_tpu.AcceleratorType(),], next_page_token="ghi",
),
cloud_tpu.ListAcceleratorTypesResponse(
accelerator_types=[
cloud_tpu.AcceleratorType(),
cloud_tpu.AcceleratorType(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_accelerator_types(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloud_tpu.GetAcceleratorTypeRequest, dict,])
def test_get_accelerator_type(request_type, transport: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.AcceleratorType(
name="name_value", type_="type__value",
)
response = client.get_accelerator_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetAcceleratorTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tpu.AcceleratorType)
assert response.name == "name_value"
assert response.type_ == "type__value"
def test_get_accelerator_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
client.get_accelerator_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetAcceleratorTypeRequest()
@pytest.mark.asyncio
async def test_get_accelerator_type_async(
transport: str = "grpc_asyncio", request_type=cloud_tpu.GetAcceleratorTypeRequest
):
client = TpuAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.AcceleratorType(name="name_value", type_="type__value",)
)
response = await client.get_accelerator_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetAcceleratorTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tpu.AcceleratorType)
assert response.name == "name_value"
assert response.type_ == "type__value"
@pytest.mark.asyncio
async def test_get_accelerator_type_async_from_dict():
await test_get_accelerator_type_async(request_type=dict)
def test_get_accelerator_type_field_headers():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.GetAcceleratorTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
call.return_value = cloud_tpu.AcceleratorType()
client.get_accelerator_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_accelerator_type_field_headers_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.GetAcceleratorTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.AcceleratorType()
)
await client.get_accelerator_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_accelerator_type_flattened():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.AcceleratorType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_accelerator_type(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_accelerator_type_flattened_error():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_accelerator_type(
cloud_tpu.GetAcceleratorTypeRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_accelerator_type_flattened_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_accelerator_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.AcceleratorType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.AcceleratorType()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_accelerator_type(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_accelerator_type_flattened_error_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_accelerator_type(
cloud_tpu.GetAcceleratorTypeRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_tpu.ListRuntimeVersionsRequest, dict,])
def test_list_runtime_versions(request_type, transport: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_runtime_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.ListRuntimeVersionsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_runtime_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListRuntimeVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRuntimeVersionsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_runtime_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_runtime_versions), "__call__"
) as call:
client.list_runtime_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListRuntimeVersionsRequest()
@pytest.mark.asyncio
async def test_list_runtime_versions_async(
transport: str = "grpc_asyncio", request_type=cloud_tpu.ListRuntimeVersionsRequest
):
client = TpuAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_runtime_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.ListRuntimeVersionsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_runtime_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListRuntimeVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRuntimeVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_runtime_versions_async_from_dict():
await test_list_runtime_versions_async(request_type=dict)
def test_list_runtime_versions_field_headers():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.ListRuntimeVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_runtime_versions), "__call__"
) as call:
call.return_value | |
import math
import gym
import numpy as np
from PIL import Image
from gym.utils import seeding
from guidance_flight_env.pid.pid_controller import PidController
from guidance_flight_env.services.map_plotter_old import MapPlotter
from guidance_flight_env.simulation_old import Simulation
from guidance_flight_env.utils import utils_old as utils
from typing import Tuple
import guidance_flight_env.properties as prp
class CartesianPosition():
def __init__(self, x, y, z=0, heading=0, offset=0):
self.x = x
self.y = y
self.z = z
self.heading = heading
self.offset = offset
def distance_to_target(self, target: 'CartesianPosition'):
return np.sqrt(np.square(target.x - self.x) + np.square(target.y - self.y) + np.square(target.z - self.z))
def vector_direction_deg(self):
""" Calculate heading in degrees of vector from origin """
heading_rad = math.atan2(self.x, self.y)
heading_deg_normalised = (math.degrees(heading_rad) - self.offset + 360) % 360
return heading_deg_normalised
def direction_to_target_deg(self, target: 'CartesianPosition'):
difference_vector = target - self
return difference_vector.vector_direction_deg()
def __sub__(self, other) -> 'CartesianPosition':
""" Returns difference between two coords as (delta_lat, delta_long) """
return CartesianPosition(self.x - other.x, self.y - other.y, self.z - other.z, heading=other.vector_direction_deg(), offset=self.offset)
def __str__(self):
return f'x: {self.x}, y: {self.y}, z: {self.z}'
class GuidanceEnv(gym.Env):
MIN_DISTANCE_TO_TARGET_KM = 100 / 1000
MIN_HEIGHT_FOR_FLARE_FT = 20
MIN_HEIGHT_FOR_FLARE_M = MIN_HEIGHT_FOR_FLARE_FT / 3.281
MAX_TARGET_DISTANCE_KM = 1.5 # ca. 30sec of flight for Cessna
MAX_HEIGHT_FT = 3500
HEIGHT_THRESHOLD_FT = 30
HEIGHT_THRESHOLD_M = HEIGHT_THRESHOLD_FT / 3.281
CRASH_HEIGHT_FT = 6
JSBSIM_DT_HZ: int = 60 # JSBSim integration frequency
pid_controller: PidController = PidController()
metadata = {
"render.modes": ["rgb-array"]
}
episode_counter = 0
continuous = False
observation_space: gym.Space = gym.spaces.Box(-np.inf, np.inf, shape=(15,), dtype=np.float32)
action_space: gym.Space = gym.spaces.Discrete(360)
def __init__(self, config):
self.min_runway_angle_threshold_deg = 5
self.runway_angle_threshold_deg = 10
self.render_progress_image = config["render_progress_image"]
self.render_progress_image_path = config["render_progress_image_path"]
self.target_radius_km = config["target_radius"]
self.infos = []
self.rewards = []
self.done = False
self.map_plotter = None
self.last_track_error = 0
self.last_track_error_perpendicular = 0
if config["agent_interaction_freq"] > self.JSBSIM_DT_HZ:
raise ValueError('agent interaction frequency must be less than '
'or equal to JSBSim integration frequency of '
f'{self.JSBSIM_DT_HZ} Hz.')
self.sim: Simulation = None
self.sim_steps_per_agent_step: int = self.JSBSIM_DT_HZ // config["agent_interaction_freq"]
self.sim_steps = self.sim_steps_per_agent_step
self.max_episode_time_s = config["max_episode_time_s"]
# math.ceil: round up to next largest integer
self.episode_steps = math.ceil(self.max_episode_time_s * config["agent_interaction_freq"])
self.steps_left = self.episode_steps
self.target_position = None
self.localizer_position: CartesianPosition = None
self.heading_at_localizer_deg = 0
# set visualisation objects
self.step_delay = None
self.jsbsim_path = config["jsbsim_path"]
self.aircraft = config["aircraft"]
self.max_distance_km = config["max_distance_km"]
self.max_target_distance_km = config["max_target_distance_km"]
self.spawn_target_distance_km = 0.5
self.last_state = None
# Set the seed.
self.np_random = None
self.to_low_height = None
if "evaluation" in config and config["evaluation"]:
seed = config["seed"]
else:
seed = config["seed"] + config.worker_index + config.num_workers + config.vector_index
self.seed(seed)
self.runway_angle_deg = None
self.glide_angle_deg = 4
self.phase = config["phase"]
self.last_distance_km = []
self.last_runway_heading_error_deg = []
self.last_distance_to_perpendicular_localizer_km = 0
self.offset = config["offset"]
def step(self, action: np.ndarray):
if not (action.shape == self.action_space.shape):
raise ValueError('mismatch between action and action space size')
heading_deg = 0
_delta_ft = 0
if self.continuous:
# for continuous action space: invert normalizaation and unpack action
# action = utils.invert_normalization(x_normalized=action[0], min_x=0.0, max_x=360.0, a=-1, b=1)
x = action[0]
y = action[1]
heading_deg = math.degrees(math.atan2(y, x))
# altitude_delta_ft = np.interp(abs(action[2]), [-1, 1], [-100, 100])
# print("altitude_delta_ft:", altitude_delta_ft)
action_target_heading_deg = heading_deg % 360
self.sim[prp.elevator_cmd] = self.pid_controller.elevator_hold(pitch_angle_reference=math.radians(0),
pitch_angle_current=self.sim[prp.pitch_rad],
pitch_angle_rate_current=self.sim[prp.q_radps])
ground_speed = np.sqrt(np.square(self.sim[prp.v_north_fps]) + np.square(self.sim[prp.v_east_fps]))
# replace with flight_path_angle_hold
# self.sim[prp.elevator_cmd] = self.pid_controller.altitude_hold(altitude_reference_ft=self.sim[prp.altitude_sl_ft] + altitude_delta_ft,
# altitude_ft=self.sim[prp.altitude_sl_ft],
# ground_speed=ground_speed,
# pitch_rad=self.sim[prp.pitch_rad],
# alpha_rad=self.sim[prp.alpha_rad],
# roll_rad=self.sim[prp.roll_rad],
# q_radps=self.sim[prp.q_radps],
# r_radps=self.sim[prp.r_radps])
# self.sim[prp.elevator_cmd] = self.pid_controller.flight_path_angle_hold(gamma_reference_rad=math.radians(0),
# pitch_rad=self.sim[prp.pitch_rad],
# alpha_rad=self.sim[prp.alpha_rad],
# q_radps=self.sim[prp.q_radps],
# roll_rad=self.sim[prp.roll_rad],
# r_radps=self.sim[prp.r_radps])
self.sim[prp.aileron_cmd] = self.pid_controller.heading_hold(
heading_reference_deg=action_target_heading_deg,
heading_current_deg=self.sim.get_heading_true_deg(),
roll_angle_current_rad=self.sim[prp.roll_rad],
roll_angle_rate=self.sim[prp.p_radps],
true_air_speed=self.sim.get_true_air_speed()
)
for step in range(self.sim_steps):
self.sim.run()
reward = self._reward()
state = self._get_observation()
self.rewards.append(reward)
self.last_state = state
self.steps_left -= 1
self.done = self._is_done()
info = self.get_info(reward=reward)
self.infos.append(info)
if self.render_progress_image and self._is_done():
rgb_array = self.render(mode="rgb_array")
image: Image = Image.fromarray(rgb_array)
image.save(f'{self.render_progress_image_path}/episode_{self.episode_counter}_{info["terminal_state"]}.png')
print("done with episode: ", self.episode_counter)
self.episode_counter += 1
return state, reward, self.done, info
def reset(self):
initial_conditions = self._get_initial_conditions()
if self.sim:
self.sim.reinitialise(init_conditions=initial_conditions)
else:
self.sim = self._init_new_sim(self.JSBSIM_DT_HZ, self.aircraft, initial_conditions)
self.steps_left = self.episode_steps
# TODO: find more elegant solution...
if self.phase == 0:
self.spawn_target_distance_km = 0.5
self.runway_angle_deg = self.np_random.uniform(-20, 20) % 360
elif self.phase == 1:
self.spawn_target_distance_km = 1
self.runway_angle_deg = self.np_random.uniform(-45, 45) % 360
elif self.phase == 2:
self.spawn_target_distance_km = 1.5
self.runway_angle_deg = self.np_random.uniform(-90, 90) % 360
elif self.phase == 3:
self.spawn_target_distance_km = 2
self.runway_angle_deg = self.np_random.uniform(-120, 120) % 360
elif self.phase == 4:
self.spawn_target_distance_km = self.max_target_distance_km
self.runway_angle_deg = self.np_random.uniform(0, 360)
if self.max_distance_km is None:
self.max_distance_km = self.sim.calculate_max_distance_km(self.max_episode_time_s)
# self.sim.start_engines() # start engines for testing the algorithm in simplest form
# Important for heading hold and altitude control when motor on:
# Mixture control - Sets the amount of fuel added to the intake airflow. At higher altitudes, the air pressure (and therefore the oxygen level) declines so the fuel volume must also be reduced to give the correct air–fuel mixture. This process is known as "leaning".
# self.sim.set_throttle_mixture_controls(0.8, 0.7)
self.sim.raise_landing_gear()
self.sim.stop_engines()
# if self.episode_counter > 100:
# self.runway_angle_deg = self.np_random.uniform(-90, 90) % 360
self.runway_angle_deg = 0 # keep on 0! move offset after training...
print(f"episode: {self.episode_counter}, runway anlge:", self.runway_angle_deg)
self.target_position = self._generate_random_target_position()
self.localizer_position = self._create_localizer()
self.localizer_glide_position = self._create_localizer_glide()
self.localizer_perpendicular_position = self._create_perpendicular_localizer()
self.last_distance_to_perpendicular_localizer_km = self.max_distance_km
relative_bearing_deg = utils.reduce_reflex_angle_deg(self.target_position.direction_to_target_deg(self.localizer_position) - self.runway_angle_deg)
relative_bearing_to_perpendicular_deg = utils.reduce_reflex_angle_deg(self.target_position.direction_to_target_deg(self.localizer_perpendicular_position) - self.runway_angle_deg)
self.example_point_position = self._create_example_point()
cross_track_error = self._calc_cross_track_error(self.example_point_position, self.target_position)
vertical_track_error = self._calc_vertical_track_error(self.example_point_position, self.target_position)
cross_track_error_perpendicular = self._calc_cross_track_error(self.example_point_position,
self.localizer_perpendicular_position)
print("cross_track_error example", f"{cross_track_error:.20f}")
print("vertical_track_error example", f"{vertical_track_error:.20f}")
print("cross_track_error_perpendicular example", f"{cross_track_error_perpendicular:.20f}")
if abs(cross_track_error) < 0.1:
print("smaller 0","-111")
self.heading_at_localizer_deg = (self.runway_angle_deg - 180) % 360
self.map_plotter = MapPlotter(target=self.target_position,
glide_angle_deg=self.glide_angle_deg,
aircraft_initial_position=CartesianPosition(0,0,0, offset=self.offset),
target_radius_km=self.target_radius_km,
localizer_position=self.localizer_position,
localizer_glide_position=self.localizer_glide_position,
localizer_perpendicular_position=self.localizer_perpendicular_position,
target_spawn_area_radius_km=self.spawn_target_distance_km,
example_position=self.example_point_position,
bounds_radius_km=self.max_distance_km,
runway_angle=self.runway_angle_deg,
offset=self.offset)
aircraft_position = self.aircraft_cartesian_position()
self.last_distance_km.append(aircraft_position.distance_to_target(self.target_position))
self.done = False
runway_heading_error_deg = utils.reduce_reflex_angle_deg(self.sim.get_heading_true_deg() - self.runway_angle_deg)
self.last_runway_heading_error_deg.append(runway_heading_error_deg)
self.to_low_height = (self.target_position.z - 10 / 1000) * 3281
self.infos = []
info = self.get_info(0)
self.infos.append(info)
return self._get_observation()
def render_html(self, path):
self.map_plotter.plot_html(infos=self.infos, path=path)
def _get_initial_conditions(self):
return {
prp.initial_altitude_ft: self.np_random.uniform(GuidanceEnv.MAX_HEIGHT_FT - GuidanceEnv.MAX_HEIGHT_FT / 3, GuidanceEnv.MAX_HEIGHT_FT),
prp.initial_terrain_altitude_ft: 0.00000001,
prp.initial_longitude_geoc_deg: self.np_random.uniform(-160, 160), # decreased range to avoid problems close to eqautor at -180 / 180
prp.initial_latitude_geod_deg: self.np_random.uniform(-70, 70), # decreased range to avoid problems close to poles at -90 / 90
prp.initial_u_fps: self.aircraft.get_cruise_speed_fps(),
prp.initial_v_fps: 0,
prp.initial_w_fps: 0,
prp.initial_p_radps: 0,
prp.initial_q_radps: 0,
prp.initial_r_radps: 0,
prp.initial_roc_fpm: 0,
prp.initial_heading_deg: self.np_random.uniform(0, 360),
}
def _create_localizer(self):
# 500 meters before runway...
distance_km = -1
heading = self.runway_angle_deg + self.offset
# rotate from N(90°);E(0°) to N(0°);E(90°)
x = self.target_position.x + distance_km * math.cos(math.radians((heading - 90) % 360))
y = self.target_position.y + distance_km * math.sin(math.radians((heading + 90) % 360))
z = self.target_position.z
localizer = CartesianPosition(x, y, z, heading=self.runway_angle_deg, offset=self.offset)
return localizer
def _create_localizer_glide(self):
# 500 meters before runway...
distance_km = -1
heading = self.runway_angle_deg + self.offset
# rotate from N(90°);E(0°) to N(0°);E(90°)
x = self.target_position.x + distance_km * math.cos(math.radians((heading - 90) % 360))
y = self.target_position.y + distance_km * math.sin(math.radians((heading + 90) % 360))
z = self.target_position.z + distance_km * math.sin(math.radians(-self.glide_angle_deg % 360))
localizer = CartesianPosition(x, y, z, heading=self.runway_angle_deg, offset=self.offset)
return localizer
def _create_perpendicular_localizer(self):
heading = self.runway_angle_deg + self.offset + 90
# 500 meters before runway...
distance_km = -1
# rotate from N(90°);E(0°) to N(0°);E(90°)
x = self.localizer_position.x + distance_km * math.cos(math.radians((heading - 90) % 360))
y = self.localizer_position.y + distance_km * math.sin(math.radians((heading + 90) % 360))
localizer = CartesianPosition(x, y, heading=heading, offset=self.offset)
return localizer
def _create_example_point(self):
distance = -0.5
# 500 meters before runway...
# rotate from N(90°);E(0°) to N(0°);E(90°)
x = self.target_position.x + distance * math.cos(math.radians((self.runway_angle_deg - 90 + self.offset) % 360))
y = self.target_position.y + distance * math.sin(math.radians((self.runway_angle_deg + 90 - self.offset) % 360))
z = self.target_position.z + distance * math.sin(math.radians(-self.glide_angle_deg % 360))
return CartesianPosition(x, y, z, heading=self.runway_angle_deg, offset=self.offset)
def _generate_random_target_position(self) -> (CartesianPosition, float):
start_distance = 600 / 1000
def random_sign():
if self.np_random.random() < 0.5:
return 1
return -1
x = self.np_random.uniform(0, self.spawn_target_distance_km) * random_sign()
print("max_target_distance_km", self.spawn_target_distance_km)
y = self.np_random.uniform(start_distance, self.spawn_target_distance_km) * random_sign()
z = self.np_random.uniform(0.2, (self.sim[prp.initial_altitude_ft] / 3281) / 2) + GuidanceEnv.MIN_HEIGHT_FOR_FLARE_M / 1000
return CartesianPosition(x, y, z, heading=self.runway_angle_deg, offset=self.offset)
def _init_new_sim(self, dt, aircraft, initial_conditions):
return Simulation(sim_frequency_hz=dt,
aircraft=aircraft,
init_conditions=initial_conditions,
jsbsim_path=self.jsbsim_path,
offset=self.offset)
def render(self, mode='rgb_array') -> np.array:
print_props: Tuple = (prp.u_fps, prp.altitude_sl_ft, prp.roll_rad, prp.sideslip_deg)
if mode == 'html':
self.map_plotter.plot_html(self.infos, path="./htmls/test.html")
elif mode == 'rgb_array':
'''
rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
'''
return self.map_plotter.render(infos=self.infos)
def close(self):
if self.sim:
self.sim.close()
def | |
If :attr:``df`` is available, it will be used to sum values
from labels in ``label_ids`` found in the data frame
rather than re-measuring values from images.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and a dictionary of metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict.fromkeys(cls._OVERLAP_METRICS, np.nan)
nuclei = np.nan
nuc_dsc = np.nan
nuc_out = np.nan
if cls.df is None:
# find DSC between original and updated versions of the
# collective region
label_masks = [np.isin(l, label_ids) for l in cls.labels_imgs]
label_vol = np.sum(label_masks[0])
label_vol_alt = np.sum(label_masks[1])
vol_dsc = atlas_stats.meas_dice(label_masks[0], label_masks[1])
# sum up volume and nuclei count in the new version outside of
# the original version; assume that remaining original volume
# will be accounted for by the other labels that reoccupy it
mask_out = np.logical_and(label_masks[1], ~label_masks[0])
vol_out = np.sum(mask_out)
if cls.heat_map is not None:
nuclei = np.sum(cls.heat_map[label_masks[0]])
nuc_dsc = atlas_stats.meas_dice(
label_masks[0], label_masks[1], cls.heat_map)
nuc_out = np.sum(cls.heat_map[mask_out])
else:
# get weighted average of DSCs from all rows in a super-region,
# assuming all rows are at the lowest hierarchical level
labels = cls.df.loc[
cls.df[LabelMetrics.Region.name].isin(label_ids)]
label_vols = labels[LabelMetrics.Volume.name]
label_vol = np.nansum(label_vols)
label_vol_alt = np.nansum(labels[LabelMetrics.VolAlt.name])
vol_dscs = labels[LabelMetrics.VolDSC.name]
vol_dsc = df_io.weight_mean(vol_dscs, label_vols)
# sum up volume and nuclei outside of original regions
vol_out = np.nansum(labels[LabelMetrics.VolOut.name])
if LabelMetrics.Nuclei.name in labels:
nucs = labels[LabelMetrics.Nuclei.name]
nuclei = np.nansum(nucs)
nuc_dscs = labels[LabelMetrics.NucDSC.name]
nuc_dsc = df_io.weight_mean(nuc_dscs, nucs)
nuc_out = np.nansum(labels[LabelMetrics.NucOut.name])
if label_vol > 0:
# update dict with metric values; px vals will not get converted
# to physical units
metrics[LabelMetrics.Volume] = label_vol
metrics[LabelMetrics.VolPx] = label_vol
metrics[LabelMetrics.VolAlt] = label_vol_alt
metrics[LabelMetrics.VolAltPx] = label_vol_alt
metrics[LabelMetrics.Nuclei] = nuclei
metrics[LabelMetrics.VolDSC] = vol_dsc
metrics[LabelMetrics.NucDSC] = nuc_dsc
metrics[LabelMetrics.VolOut] = vol_out
metrics[LabelMetrics.NucOut] = nuc_out
disp_id = get_single_label(label_ids)
print("overlaps within label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
def measure_labels_overlap(labels_imgs, heat_map=None, spacing=None,
unit_factor=None, combine_sides=True,
label_ids=None, grouping={}, df=None):
"""Compute metrics comparing two version of atlas labels.
Args:
labels_imgs: Sequence of integer labels image as Numpy arrays.
heat_map: Numpy array as a density map; defaults to None to ignore
density measurements.
spacing: Sequence of image spacing for each pixel in the images.
unit_factor: Unit factor conversion; defaults to None. Eg use
1000 to convert from um to mm.
combine_sides: True to combine corresponding labels from opposite
sides of the sample; defaults to True. Corresponding labels
are assumed to have the same absolute numerical number and
differ only in signage. May be False if combining by passing
both pos/neg labels in ``label_ids``.
label_ids: Sequence of label IDs to include. Defaults to None,
in which case the labels will be taken from unique values
in ``labels_img_np``.
grouping: Dictionary of sample grouping metadata, where each
entry will be added as a separate column. Defaults to an
empty dictionary.
df: Data frame with rows for all drawn labels to pool into
parent labels instead of re-measuring stats for all
children of each parent; defaults to None.
Returns:
:obj:`pd.DataFrame`: Pandas data frame of the regions and weighted
means for the metrics.
"""
start_time = time()
if df is None:
vol_args = {"spacing": spacing, "unit_factor": unit_factor}
else:
_update_df_side(df)
vol_args = {}
# use a class to set and process the label without having to
# reference the labels image as a global variable
MeasureLabelOverlap.set_data(labels_imgs, heat_map, df)
metrics = {}
grouping[config.AtlasMetrics.SIDE.value] = None
pool = chunking.get_mp_pool()
pool_results = []
for label_id in label_ids:
# include corresponding labels from opposite sides while skipping
# background
if label_id == 0: continue
if combine_sides: label_id = [label_id, -1 * label_id]
pool_results.append(
pool.apply_async(
MeasureLabelOverlap.measure_overlap, args=(label_id,)))
for result in pool_results:
# get metrics by label
label_id, label_metrics = result.get()
label_size, nuc, _ = _parse_vol_metrics(
label_metrics, extra_keys=(LabelMetrics.VolOut,), **vol_args)
# transfer all found metrics to master dictionary
_update_vol_dicts(label_id, label_metrics, grouping, metrics)
pool.close()
pool.join()
# make data frame of raw metrics, dropping columns of all NaNs
df = pd.DataFrame(metrics)
df = df.dropna(axis=1, how="all")
df_io.print_data_frame(df)
print("time elapsed to measure variation:", time() - start_time)
return df
def map_meas_to_labels(labels_img, df, meas, fn_avg, skip_nans=False,
reverse=False, col_wt=None):
"""Generate a map of a given measurement on a labels image.
The intensity values of labels will be replaced by the given metric
of the chosen measurement, such as the mean of the densities. If
multiple conditions exist, the difference of metrics for the first
two conditions will be taken under the assumption that the values for
each condition are in matching order.
Args:
labels_img: Labels image as a Numpy array in x,y,z.
df: Pandas data frame with measurements by regions corresponding
to that of ``labels_img``.
meas: Name of column in ``df`` from which to extract measurements.
fn_avg: Function to apply to the column for each region. If None,
``df`` is assumed to already contain statistics generated from
the ``clrstats`` R package, which will be extracted directly.
skip_nans: True to skip any region with NaNs, leaving 0 instead;
defaults to False to allow NaNs in resulting image. Some
applications may not be able to read NaNs, so this parameter
allows giving a neutral value instead.
reverse: Reverse the order of sorted conditions when generating
stats by ``fn_avg`` to compare conditions; defaults to False.
col_wt (str): Name of column to use for weighting, where the
magnitude of ``meas`` will be adjusted as fractions of the max
value in this weighting column for labels found in ``labels_img``;
defaults to None.
Retunrs:
A map of averages for the given measurement as an image of the
same shape as ``labels_img`` of float data type, or None if no
values for ``meas`` are found.
"""
if meas not in df or np.all(np.isnan(df[meas])):
# ensure that measurement column is present with non-NaNs
print("{} not in data frame or all NaNs, no image to generate"
.format(meas))
return None
# make image array to map differences for each label and filter data
# frame to get only these regions
labels_diff = np.zeros_like(labels_img, dtype=np.float)
labels_img_abs = np.abs(labels_img)
regions = np.unique(labels_img_abs)
df = df.loc[df["Region"].isin(regions)].copy()
df_cond = None
conds = None
if "Condition" in df:
# get and sort conditions
df_cond = df["Condition"]
conds = sorted(np.unique(df_cond), reverse=reverse)
if col_wt is not None:
# weight given column for the first condition and normalizing it to
# its maximum value, or use the whole column if no conditions exist
print("weighting stats by", col_wt)
wts = df.loc[df_cond == conds[0], col_wt] if conds else df[col_wt]
wts /= max(wts)
if conds:
for cond in conds:
# use raw values to avoid multiplying by index; assumes
# matching order of values between conditions
df.loc[df_cond == cond, meas] = np.multiply(
df.loc[df_cond == cond, meas].values, wts.values)
else:
df.loc[:, meas] *= wts
for region in regions:
# get difference for each region, either from a single column
# that already has the difference of effect size of by taking
# the difference from two columns
df_region = df[df[LabelMetrics.Region.name] == region]
labels_region = labels_img_abs == region
diff = np.nan
if fn_avg is None:
# assume that df was output by R clrstats package
if df_region.shape[0] > 0:
diff = df_region[meas]
else:
if len(conds) >= 2:
# compare the metrics for the first two conditions
avgs = []
for cond in conds:
# gather separate metrics for each condition
df_region_cond = df_region[df_region["Condition"] == cond]
# print(df_region_cond)
reg_avg = fn_avg(df_region_cond[meas])
# print(region, cond, reg_avg)
avgs.append(reg_avg)
# TODO: consider making order customizable
diff = avgs[1] - avgs[0]
else:
# take the metric for the single condition
diff = fn_avg(df_region[meas])
if skip_nans and np.isnan(diff):
| |
})
))
class TestITrueDivide(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
a /= b
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def generate_inputs(self):
# Do not divide by small number to avoid ridiculously large outputs.
if not self.is_scalar_rhs and self.input == 'random':
in_dtype, = self.in_dtypes
low = -5 if numpy.dtype(in_dtype).kind != 'u' else 2
high = 5
x = array_utils.uniform(self.shape, in_dtype, low=low, high=high)
x[(-1 < x) & (x < 0)] = -2
x[(0 <= x) & (x < 1)] = 2
return x,
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.divide(a, scalar)
else:
return xp.divide(scalar, a)
else:
if self.is_scalar_rhs:
return a / scalar
else:
return scalar / a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestITrueDivideScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a /= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('in_dtypes,out_dtype', [
(('bool_',), 'int64'),
(('int8',), 'int64'),
(('int16',), 'int64'),
(('int32',), 'int64'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
# TODO(niboshi): Unsigned integer dtypes should result in uint64.
# Currently chainerx returns int64.
(('uint8',), 'int64'),
])
@chainer.testing.parameterize_pytest('shape,axis', [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # sum over 1-dim axis
((0, 3), None), # sum over 0-dim axis
# Sum over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Sum over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
])
@chainer.testing.parameterize_pytest('keepdims', [True, False])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
if self.is_module:
return xp.sum(a, axis=self.axis, keepdims=self.keepdims)
else:
return a.sum(axis=self.axis, keepdims=self.keepdims)
@op_utils.op_test(['native:0'])
class TestSumStability(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.full(2 ** 20, 0.1, dtype=numpy.float32),
def forward_xp(self, inputs, xp):
x, = inputs
if xp is chainerx:
return x.sum(),
else:
return (x[0] * x.size).astype(x.dtype),
@op_utils.op_test(['native:0'])
@chainer.testing.parameterize_pytest('size', list(range(1024)))
class TestSumEachSize(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.arange(self.size, dtype=numpy.int32) + 1,
def forward_xp(self, inputs, xp):
x, = inputs
return x.sum(),
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
# ((), 0), # TODO(sonots): Fix compatibility
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
xp.sum(a, axis=axis, keepdims=keepdims)
else:
a.sum(axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMinimumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.minimum(a, scalar)
else:
return xp.minimum(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [0, 1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMaximumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.maximum(a, scalar)
else:
return xp.maximum(scalar, a)
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog10(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log10(a)
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
class TestLogSumExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
# TODO(imanishi): Support device implementation and remove this.
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
keepdims = self.keepdims
if xp is chainerx:
return chainerx.logsumexp(x, axis=axis, keepdims=keepdims),
x = x.astype(self.out_dtype)
return numpy.log(numpy.exp(x).sum(axis=axis, keepdims=keepdims)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
# TODO(hvy): Should not overflow for large numbers, add tests
def test_logsumexp_invalid(device, a_shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestLogSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
| |
self._typed_ops[type_]
del self.types[type_]
def __contains__(self, type_):
type_ = _normalize_type(type_)
return type_ in self._typed_ops
@classmethod
def _remove_nesting(cls, funcname, *, module=None, modname=None, strict=True):
if module is None:
module = cls._module
if modname is None:
modname = cls._modname
if "." not in funcname:
if strict and hasattr(module, funcname):
raise AttributeError(f"{modname}.{funcname} is already defined")
else:
path, funcname = funcname.rsplit(".", 1)
for folder in path.split("."):
if not hasattr(module, folder):
setattr(module, folder, OpPath(module, folder))
module = getattr(module, folder)
modname = f"{modname}.{folder}"
if not isinstance(module, (OpPath, ModuleType)):
raise AttributeError(
f"{modname} is already defined. Cannot use as a nested path."
)
# Can't use `hasattr` here, b/c we use `__getattr__` in numpy namespaces
if strict and funcname in module.__dict__:
raise AttributeError(f"{path}.{funcname} is already defined")
return module, funcname
@classmethod
def _find(cls, funcname):
rv = cls._module
for attr in funcname.split("."):
rv = getattr(rv, attr, None)
if rv is None:
break
return rv
@classmethod
def _initialize(cls):
if cls._initialized:
return
# Read in the parse configs
trim_from_front = cls._parse_config.get("trim_from_front", 0)
delete_exact = cls._parse_config.get("delete_exact", None)
num_underscores = cls._parse_config["num_underscores"]
varnames = tuple(x for x in dir(lib) if x[0] != "_")
for re_str, return_prefix in (
("re_exprs", None),
("re_exprs_return_bool", "BOOL"),
("re_exprs_return_float", "FP"),
("re_exprs_return_complex", "FC"),
):
if re_str not in cls._parse_config:
continue
if "complex" in re_str and not _supports_complex: # pragma: no cover
continue
for r in reversed(cls._parse_config[re_str]):
for varname in varnames:
m = r.match(varname)
if m:
# Parse function into name and datatype
gb_name = m.string
splitname = gb_name[trim_from_front:].split("_")
if delete_exact and delete_exact in splitname:
splitname.remove(delete_exact)
if len(splitname) == num_underscores + 1:
*splitname, type_ = splitname
else:
type_ = None
name = "_".join(splitname).lower()
# Create object for name unless it already exists
if not hasattr(cls._module, name):
obj = cls(name)
setattr(cls._module, name, obj)
if not hasattr(op, name):
setattr(op, name, obj)
else:
obj = getattr(cls._module, name)
gb_obj = getattr(lib, varname)
# Determine return type
if return_prefix == "BOOL":
return_type = "BOOL"
if type_ is None:
type_ = "BOOL"
else:
if type_ is None: # pragma: no cover
raise TypeError(f"Unable to determine return type for {varname}")
if return_prefix is None:
return_type = type_
else:
# Grab the number of bits from type_
num_bits = type_[-2:]
if num_bits not in {"32", "64"}: # pragma: no cover
raise TypeError(f"Unexpected number of bits: {num_bits}")
return_type = f"{return_prefix}{num_bits}"
builtin_op = cls._typed_class(
obj, name, type_, return_type, gb_obj, gb_name
)
obj._add(builtin_op)
cls._initialized = True
class UnaryOp(OpBase):
__slots__ = ()
_module = unary
_modname = "unary"
_typed_class = TypedBuiltinUnaryOp
_parse_config = {
"trim_from_front": 4,
"num_underscores": 1,
"re_exprs": [
re.compile(
"^GrB_(IDENTITY|AINV|MINV|ABS|BNOT)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile(
"^GxB_(LNOT|ONE|POSITIONI1|POSITIONI|POSITIONJ1|POSITIONJ)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(SQRT|LOG|EXP|LOG2|SIN|COS|TAN|ACOS|ASIN|ATAN|SINH|COSH|TANH|ACOSH"
"|ASINH|ATANH|SIGNUM|CEIL|FLOOR|ROUND|TRUNC|EXP2|EXPM1|LOG10|LOG1P)"
"_(FP32|FP64|FC32|FC64)$"
),
re.compile("^GxB_(LGAMMA|TGAMMA|ERF|ERFC|FREXPX|FREXPE)_(FP32|FP64)$"),
re.compile("^GxB_(IDENTITY|AINV|MINV|ONE|CONJ)_(FC32|FC64)$"),
],
"re_exprs_return_bool": [
re.compile("^GrB_LNOT$"),
re.compile("^GxB_(ISINF|ISNAN|ISFINITE)_(FP32|FP64|FC32|FC64)$"),
],
"re_exprs_return_float": [re.compile("^GxB_(CREAL|CIMAG|CARG|ABS)_(FC32|FC64)$")],
}
@classmethod
def _build(cls, name, func, *, anonymous=False):
if type(func) is not FunctionType:
raise TypeError(f"UDF argument must be a function, not {type(func)}")
if name is None:
name = getattr(func, "__name__", "<anonymous_unary>")
success = False
new_type_obj = cls(name, anonymous=anonymous)
return_types = {}
nt = numba.types
for type_, sample_val in _sample_values.items():
type_ = lookup_dtype(type_)
# Check if func can handle this data type
try:
with np.errstate(divide="ignore", over="ignore", under="ignore", invalid="ignore"):
ret = func(sample_val)
ret_type = lookup_dtype(type(ret))
if ret_type != type_ and (
("INT" in ret_type.name and "INT" in type_.name)
or ("FP" in ret_type.name and "FP" in type_.name)
or ("FC" in ret_type.name and "FC" in type_.name)
or (
type_ == "UINT64"
and ret_type == "FP64"
and return_types.get("INT64") == "INT64"
)
):
# Downcast `ret_type` to `type_`.
# This is what users want most of the time, but we can't make a perfect rule.
# There should be a way for users to be explicit.
ret_type = type_
elif type_ == "BOOL" and ret_type == "INT64" and return_types.get("INT8") == "INT8":
ret_type = INT8
# Numba is unable to handle BOOL correctly right now, but we have a workaround
# See: https://github.com/numba/numba/issues/5395
# We're relying on coercion behaving correctly here
input_type = INT8 if type_ == "BOOL" else type_
return_type = INT8 if ret_type == "BOOL" else ret_type
# JIT the func so it can be used from a cfunc
unary_udf = numba.njit(func)
# Build wrapper because GraphBLAS wants pointers and void return
wrapper_sig = nt.void(
nt.CPointer(return_type.numba_type),
nt.CPointer(input_type.numba_type),
)
if type_ == "BOOL":
if ret_type == "BOOL":
def unary_wrapper(z, x):
z[0] = bool(unary_udf(bool(x[0]))) # pragma: no cover
else:
def unary_wrapper(z, x):
z[0] = unary_udf(bool(x[0])) # pragma: no cover
elif ret_type == "BOOL":
def unary_wrapper(z, x):
z[0] = bool(unary_udf(x[0])) # pragma: no cover
else:
def unary_wrapper(z, x):
z[0] = unary_udf(x[0]) # pragma: no cover
unary_wrapper = numba.cfunc(wrapper_sig, nopython=True)(unary_wrapper)
new_unary = ffi_new("GrB_UnaryOp*")
check_status_carg(
lib.GrB_UnaryOp_new(
new_unary, unary_wrapper.cffi, ret_type.gb_obj, type_.gb_obj
),
"UnaryOp",
new_unary,
)
op = TypedUserUnaryOp(
new_type_obj, name, type_.name, ret_type.name, new_unary[0], func, unary_udf
)
new_type_obj._add(op)
success = True
return_types[type_.name] = ret_type.name
except Exception:
continue
if success:
return new_type_obj
else:
raise UdfParseError("Unable to parse function using Numba")
@classmethod
def register_anonymous(cls, func, name=None, *, parameterized=False):
if parameterized:
return ParameterizedUnaryOp(name, func, anonymous=True)
return cls._build(name, func, anonymous=True)
@classmethod
def register_new(cls, name, func, *, parameterized=False):
module, funcname = cls._remove_nesting(name)
if parameterized:
unary_op = ParameterizedUnaryOp(name, func)
else:
unary_op = cls._build(name, func)
setattr(module, funcname, unary_op)
# Also save it to `grblas.op` if not yet defined
module, funcname = cls._remove_nesting(name, module=op, modname="op", strict=False)
if not hasattr(module, funcname):
setattr(module, funcname, unary_op)
__call__ = TypedBuiltinUnaryOp.__call__
class BinaryOp(OpBase):
__slots__ = "_monoid"
_module = binary
_modname = "binary"
_typed_class = TypedBuiltinBinaryOp
_parse_config = {
"trim_from_front": 4,
"num_underscores": 1,
"re_exprs": [
re.compile(
"^GrB_(FIRST|SECOND|PLUS|MINUS|TIMES|DIV|MIN|MAX)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile(
"GrB_(BOR|BAND|BXOR|BXNOR)" "_(INT8|INT16|INT32|INT64|UINT8|UINT16|UINT32|UINT64)$"
),
re.compile(
"^GxB_(POW|RMINUS|RDIV|PAIR|ANY|ISEQ|ISNE|ISGT|ISLT|ISGE|ISLE|LOR|LAND|LXOR)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile("^GxB_(FIRST|SECOND|PLUS|MINUS|TIMES|DIV)_(FC32|FC64)$"),
re.compile("^GxB_(ATAN2|HYPOT|FMOD|REMAINDER|LDEXP|COPYSIGN)_(FP32|FP64)$"),
re.compile(
"GxB_(BGET|BSET|BCLR|BSHIFT|FIRSTI1|FIRSTI|FIRSTJ1|FIRSTJ"
"|SECONDI1|SECONDI|SECONDJ1|SECONDJ)"
"_(INT8|INT16|INT32|INT64|UINT8|UINT16|UINT32|UINT64)$"
),
],
"re_exprs_return_bool": [
re.compile("^GrB_(LOR|LAND|LXOR|LXNOR)$"),
re.compile(
"^GrB_(EQ|NE|GT|LT|GE|LE)_"
"(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(LOR|LAND|LXOR)_"
"(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile("^GxB_(EQ|NE)_(FC32|FC64)$"),
],
"re_exprs_return_complex": [re.compile("^GxB_(CMPLX)_(FP32|FP64)$")],
}
@classmethod
def _build(cls, name, func, *, anonymous=False):
if not isinstance(func, FunctionType):
raise TypeError(f"UDF argument must be a function, not {type(func)}")
if name is None:
name = getattr(func, "__name__", "<anonymous_binary>")
success = False
new_type_obj = cls(name, anonymous=anonymous)
return_types = {}
nt = numba.types
for type_, sample_val in _sample_values.items():
type_ = lookup_dtype(type_)
# Check if func can handle this data type
try:
with np.errstate(divide="ignore", over="ignore", under="ignore", invalid="ignore"):
ret = func(sample_val, sample_val)
ret_type = lookup_dtype(type(ret))
if ret_type != type_ and (
("INT" in ret_type.name and "INT" in type_.name)
or ("FP" in ret_type.name and "FP" in type_.name)
or ("FC" in ret_type.name and "FC" in type_.name)
or (
type_ == "UINT64"
and ret_type == "FP64"
and return_types.get("INT64") == "INT64"
)
):
# Downcast `ret_type` to `type_`.
# This is what users want most of the time, but we can't make a perfect rule.
# There should be a way for users to be explicit.
ret_type = type_
elif type_ == "BOOL" and ret_type == "INT64" and return_types.get("INT8") == "INT8":
ret_type = INT8
# Numba is unable to handle BOOL correctly right now, but we have a workaround
# See: https://github.com/numba/numba/issues/5395
# We're relying on coercion behaving correctly here
input_type = INT8 if type_ == "BOOL" else type_
return_type = INT8 if ret_type == "BOOL" else ret_type
# JIT the func so it can be used from a cfunc
binary_udf = numba.njit(func)
# Build wrapper because GraphBLAS wants pointers and void return
wrapper_sig = nt.void(
nt.CPointer(return_type.numba_type),
nt.CPointer(input_type.numba_type),
nt.CPointer(input_type.numba_type),
)
if type_ == "BOOL":
if ret_type == "BOOL":
def binary_wrapper(z, x, y):
z[0] = bool(binary_udf(bool(x[0]), bool(y[0]))) # pragma: no cover
else:
def binary_wrapper(z, x, y):
z[0] = binary_udf(bool(x[0]), bool(y[0])) # pragma: no cover
elif ret_type == "BOOL":
def binary_wrapper(z, x, y):
z[0] = bool(binary_udf(x[0], y[0])) # pragma: no cover
else:
def binary_wrapper(z, x, y):
z[0] = binary_udf(x[0], y[0]) # pragma: no cover
binary_wrapper = numba.cfunc(wrapper_sig, nopython=True)(binary_wrapper)
new_binary = ffi_new("GrB_BinaryOp*")
check_status_carg(
lib.GrB_BinaryOp_new(
new_binary,
binary_wrapper.cffi,
ret_type.gb_obj,
type_.gb_obj,
type_.gb_obj,
),
"BinaryOp",
new_binary,
)
op = TypedUserBinaryOp(
new_type_obj, name, type_.name, ret_type.name, new_binary[0], func, binary_udf
)
new_type_obj._add(op)
success = True
return_types[type_.name] = ret_type.name
except Exception:
continue
if success:
return new_type_obj
else:
raise UdfParseError("Unable to parse function using Numba")
@classmethod
def register_anonymous(cls, func, name=None, *, parameterized=False):
if parameterized:
return ParameterizedBinaryOp(name, func, anonymous=True)
return cls._build(name, func, anonymous=True)
@classmethod
def register_new(cls, name, func, *, parameterized=False):
module, | |
<filename>acsm/utils/bird_vis.py
"""
Code borrowed from
https://github.com/akanazawa/cmr/blob/master/utils/bird_vis.py
Visualization helpers specific to birds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
import numpy as np
import os.path as osp
import cv2
import pdb
from . import cub_parse
from ..nnutils.nmr import NeuralRenderer
from ..utils import transformations
from . import visutil
import pdb
class VisRenderer(object):
"""
Utility to render meshes using pytorch NMR
faces are F x 3 or 1 x F x 3 numpy
"""
def __init__(self, img_size, faces, t_size=3):
self.renderer = NeuralRenderer(img_size)
self.faces = Variable(
torch.IntTensor(faces).cuda(), requires_grad=False)
if self.faces.dim() == 2:
self.faces = torch.unsqueeze(self.faces, 0)
default_tex = np.ones((1, self.faces.shape[1], t_size, t_size, t_size,
3))
blue = np.array([156, 199, 234.]) / 255.
default_tex = default_tex * blue
# Could make each triangle different color
self.default_tex = Variable(
torch.FloatTensor(default_tex).cuda(), requires_grad=False)
# rot = transformations.quaternion_about_axis(np.pi/8, [1, 0, 0])
# This is median quaternion from sfm_pose
# rot = np.array([ 0.66553962, 0.31033762, -0.02249813, 0.01267084])
# This is the side view:
import cv2
R0 = cv2.Rodrigues(np.array([np.pi / 3, 0, 0]))[0]
R1 = cv2.Rodrigues(np.array([0, np.pi / 2, 0]))[0]
R = R1.dot(R0)
R = np.vstack((np.hstack((R, np.zeros((3, 1)))), np.array([0, 0, 0,
1])))
rot = transformations.quaternion_from_matrix(R, isprecise=True)
cam = np.hstack([0.75, 0, 0, rot])
self.default_cam = Variable(
torch.FloatTensor(cam).cuda(), requires_grad=False)
self.default_cam = torch.unsqueeze(self.default_cam, 0)
def __call__(self, verts, cams=None, texture=None, rend_mask=False):
"""
verts is |V| x 3 cuda torch Variable
cams is 7, cuda torch Variable
Returns N x N x 3 numpy
"""
if texture is None:
texture = self.default_tex
elif texture.dim() == 5:
# Here input it F x T x T x T x 3 (instead of F x T x T x 3)
# So add batch dim.
texture = torch.unsqueeze(texture, 0)
if cams is None:
cams = self.default_cam
elif cams.dim() == 1:
cams = torch.unsqueeze(cams, 0)
if verts.dim() == 2:
verts = torch.unsqueeze(verts, 0)
verts = asVariable(verts)
cams = asVariable(cams)
texture = asVariable(texture)
if rend_mask:
rend = self.renderer.forward(verts, self.faces, cams)
rend = rend.repeat(3, 1, 1)
rend = rend.unsqueeze(0)
else:
rend = self.renderer.forward(verts, self.faces, cams, texture)
rend = rend.data.cpu().numpy()[0].transpose((1, 2, 0))
rend = np.clip(rend, 0, 1) * 255.0
return rend.astype(np.uint8)
def rotated(self, vert, deg, axis=[0, 1, 0], cam=None, texture=None):
"""
vert is N x 3, torch FloatTensor (or Variable)
"""
import cv2
new_rot = cv2.Rodrigues(np.deg2rad(deg) * np.array(axis))[0]
new_rot = convert_as(torch.FloatTensor(new_rot), vert)
center = vert.mean(0)
new_vert = torch.t(torch.matmul(new_rot,
torch.t(vert - center))) + center
# new_vert = torch.matmul(vert - center, new_rot) + center
return self.__call__(new_vert, cams=cam, texture=texture)
def diff_vp(self,
verts,
cam=None,
angle=90,
axis=[1, 0, 0],
texture=None,
kp_verts=None,
new_ext=None,
extra_elev=False):
if cam is None:
cam = self.default_cam[0]
if new_ext is None:
new_ext = [0.6, 0, 0]
# Cam is 7D: [s, tx, ty, rot]
import cv2
cam = asVariable(cam)
quat = cam[-4:].view(1, 1, -1)
R = transformations.quaternion_matrix(
quat.squeeze().data.cpu().numpy())[:3, :3]
rad_angle = np.deg2rad(angle)
rotate_by = cv2.Rodrigues(rad_angle * np.array(axis))[0]
# new_R = R.dot(rotate_by)
new_R = rotate_by.dot(R)
if extra_elev:
# Left multiply the camera by 30deg on X.
R_elev = cv2.Rodrigues(np.array([np.pi / 9, 0, 0]))[0]
new_R = R_elev.dot(new_R)
# Make homogeneous
new_R = np.vstack(
[np.hstack((new_R, np.zeros((3, 1)))),
np.array([0, 0, 0, 1])])
new_quat = transformations.quaternion_from_matrix(
new_R, isprecise=True)
new_quat = Variable(torch.Tensor(new_quat).cuda(), requires_grad=False)
# new_cam = torch.cat([cam[:-4], new_quat], 0)
new_ext = Variable(torch.Tensor(new_ext).cuda(), requires_grad=False)
new_cam = torch.cat([new_ext, new_quat], 0)
rend_img = self.__call__(verts, cams=new_cam, texture=texture)
if kp_verts is None:
return rend_img
else:
kps = self.renderer.project_points(
kp_verts.unsqueeze(0), new_cam.unsqueeze(0))
kps = kps[0].data.cpu().numpy()
return kp2im(kps, rend_img, radius=1)
def set_bgcolor(self, color):
self.renderer.set_bgcolor(color)
def set_light_dir(self, direction, int_dir=0.8, int_amb=0.8):
renderer = self.renderer.renderer.renderer
renderer.light_direction = direction
renderer.light_intensity_directional = int_dir
renderer.light_intensity_ambient = int_amb
def set_light_status(self, use_lights):
renderer = self.renderer.renderer.renderer
renderer.use_lights = use_lights
return
def contour_alphas(H, W, real_img=False):
alphas = np.zeros((H,W))
n_lines_H = 20
n_lines_W = 10
if real_img:
line_width_x = 10
line_width_y = 10
else:
line_width_x = 10
line_width_y = 10
line_pos_x = (np.linspace(0, W-1, n_lines_W, endpoint=False) + 0.5*W/n_lines_W).astype(np.int)
line_pos_y = (np.linspace(0, H-1, n_lines_H, endpoint=False) + 0.5*H/n_lines_H).astype(np.int)
for x in line_pos_x:
alphas[:, x-line_width_x: x+line_width_x+1] = 1
for y in line_pos_y:
alphas[y-line_width_y: y+line_width_y+1, :] = 1
return torch.Tensor(alphas).unsqueeze(0)
def sample_UV_contour(img, uv_map, uv_img, mask, real_img=False):
img = img.unsqueeze(0)
uv_map = uv_map.unsqueeze(0)
uv_img = uv_img.unsqueeze(0)
uv_sample = torch.nn.functional.grid_sample(uv_img, 2*uv_map-1).squeeze(0)
uv_sample = uv_sample*mask + (1-mask)
# alphas = contour_alphas(uv_img.shape[2], uv_img.shape[3], real_img).unsqueeze(0)
alphas = contour_alphas(uv_img.shape[2], uv_img.shape[3], real_img).unsqueeze(0)* 0 + 1
# pdb.set_trace()
alpha_sample = torch.nn.functional.grid_sample(alphas, 2*uv_map-1).squeeze(0)
# alpha_sample = alpha_sample*0.95 + 0.05
alpha_sample = (alpha_sample>0.0).float()*0.7
# alpha_sample = (alpha_sample > 0.9).float()
alpha_sample = alpha_sample*mask
if real_img:
# uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)*0.3 * (mask) + img.squeeze(0)*(1-alpha_sample)* (1-mask)*0.3
uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)
uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)*0.4 * (mask) + img.squeeze(0)*(1-alpha_sample)* (1-mask)
else:
uv_rendering = (uv_sample*alpha_sample) + (img.squeeze(0)*(1-alpha_sample))
return uv_rendering
def draw_points_on_image(img, points,):
img = img.copy()
for kpx, keypoint in enumerate(points):
color = (0,0,0)
img = cv2.circle(img,(keypoint[0],keypoint[1]), 5, (color[0]*255,color[1]*255,color[2]*255), -1)
return img
def draw_keypoint_on_image(img, keypoints, keypoints_vis, color_map=None):
img = img.copy()
for kpx, (keypoint, vis) in enumerate(zip(keypoints,keypoints_vis)):
if vis > 0:
color = (0,0,255)
if color_map is not None:
color = color_map[kpx]
img = cv2.circle(img,(keypoint[0],keypoint[1]), 10, (color[0]*255,color[1]*255,color[2]*255), -1)
return img
def write_on_image(img, text, location): ## location is x,y
img = img.copy()
color = (0,0,255)
img = cv2.putText(img,"{}".format(text), (location[0],location[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2)
return img
def draw_keypoint_and_text_on_image(img, keypoints, keypoints_vis, color_map=None, text=None, text_col=None):
img = img.copy()
for kpx, (keypoint, vis) in enumerate(zip(keypoints,keypoints_vis)):
if vis > 0:
color = (0,0,255)
if color_map is not None:
color = color_map[kpx]
img = cv2.circle(img,(keypoint[0],keypoint[1]), 5, (color[0]*255,color[1]*255,color[2]*255), -1)
color = (0,0,255)
if text_col is not None:
color = text_col[kpx]
if text is not None:
img = cv2.putText(img,"{}".format(text[kpx]), (keypoint[0],keypoint[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=1)
return img
def save_obj_with_texture(name, results_path, texture_img, mean_shape):
visutil.mkdir(results_path)
verts = np.round(mean_shape['verts'],5)
uv_verts = np.round(mean_shape['uv_verts'],5)
faces = mean_shape['faces']
obj_file = osp.join(results_path, '{}.obj'.format(name))
with open(obj_file,'w') as f:
f.write('mtllib {}.mtl\n'.format(name))
## write vertices
for v in verts:
f.write("v {} {} {}\n".format(v[0], v[1], v[2]))
## write texture vertices
for v in uv_verts:
f.write("vt {} {}\n".format(v[0], 1 - v[1]))
f.write('usemtl bird\n')
f.write('s 1\n')
## write faces
faces = faces + 1
for fv in faces:
f.write('f {}/{}/ {}/{}/ {}/{}/ \n'.format(fv[0], fv[0], fv[1], fv[1], fv[2], fv[2]))
# for fv in faces:
# f.write('f {}// {}// {}// \n'.format(fv[0]+1, fv[1]+1, fv[2]+1))
## mtl file
mtl_file = osp.join(results_path, '{}.mtl'.format(name))
with open(mtl_file,'w') as f:
f.write('# Material Count: 1\n')
f.write('newmtl bird\n')
f.write('Ns 96.078431\n')
f.write('Ka 1.000000 1.000000 1.000000\n')
f.write('Kd 0.640000 0.640000 0.640000\n')
f.write('Ks 0.500000 0.500000 0.500000\n')
f.write('Ke 0.000000 0.000000 0.000000\n')
f.write('Ni 1.00000\n')
f.write('d 1.000000\n')
f.write('illum 2\n')
f.write('map_Kd {}.png\n'.format(name,))
## Dump the texture image
# texture_img[:,:,0], texture_img[:,:,2] = texture_img[:,:,2], texture_img[:,:,0]
# texture_img = texture_img[:,:,[2,1,0]]
# pdb.set_trace()
visutil.save_image(texture_img, osp.join(results_path,'{}.png'.format(name)))
return
def merge_textures(foreground, background,):
'''
3, H, W
Assume foreground to have 1 in the A channel and 0 for the background.
'''
texture = foreground * (foreground[3,None,...] > 0.5) + background * (foreground[3,None,...] <0.5)
return texture
def render_transfer_kps_imgs(keypoint_cmap, img1, img2, kps1, kps2, transfer_kps12, transfer_kps21, common_kps):
visuals = {}
common_vis = kps1[:,0]*0
common_vis[common_kps] = 1
img1_tfs = draw_keypoint_on_image(img1, kps1,
common_vis, keypoint_cmap)
img2_tfs = draw_keypoint_on_image(img2, kps2,
common_vis, keypoint_cmap)
img_tfs12 = draw_keypoint_on_image(img2, transfer_kps12,
common_vis, keypoint_cmap)
img_tfs21 = draw_keypoint_on_image(img1, transfer_kps21,
common_vis, keypoint_cmap)
visuals['tfs_a_img1'] = img1_tfs
visuals['tfs_d_img2'] = img2_tfs
visuals['tfs_b_1to2'] = img_tfs12
visuals['tfs_c_2to1'] = img_tfs21
return visuals
def create_monocolor_texture(uvimg_H, uvimg_W, color=None):
if color is None:
color = [156, 199, 234., 0]
default_tex = np.ones((uvimg_H, uvimg_W,4))
blue = np.array(color) / 255.
blue[3] = color[3]
default_tex = default_tex * blue.reshape(1,1,-1)
return default_tex.transpose(2,0,1)
def create_kp_heat_map_texture(uvimg_H, uvimg_W, u_cord=None, v_cord=None, color=None, transprancy=1):
default_tex = create_monocolor_texture(uvimg_H, uvimg_W)
if color is None:
color = (1,0,0)
box_size = 3
if v_cord is not None and u_cord is not None:
default_tex = default_tex*0
default_tex[0, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[0]
default_tex[1, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[1]
default_tex[2, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[2]
default_tex[3, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = transprancy
return default_tex
def upsample_img_mask_uv_map(img, mask, uv_map):
uv_map = torch.nn.functional.upsample(uv_map.permute(2,0,1).unsqueeze(0), scale_factor=4, mode='bilinear')
mask = torch.nn.functional.upsample(mask.unsqueeze(0), scale_factor=4, mode='nearest').squeeze(0)
img = torch.nn.functional.upsample(img.unsqueeze(0), scale_factor=4, mode='nearest').squeeze(0)
uv_map = uv_map.squeeze().permute(1,2,0)
return img, mask, uv_map
def create_texture_image_from_uv_map(uvimg_H, uvimg_W, uv_map, img, mask):
default_tex = np.ones((uvimg_H, uvimg_W,3))
blue = np.array([156, 199, 234.]) / 255.
default_tex = default_tex * blue.reshape(1,1,-1)
count_tex = np.zeros((uvimg_H, uvimg_W,1))
uv_map_inds = uv_map.copy()
uv_map_inds[:,:,0] = np.clip((uv_map[:,:,0] * uvimg_W).round(), 0, uvimg_W-1)
uv_map_inds[:,:,1] = np.clip((uv_map[:,:,1] * uvimg_H).round(), 0, uvimg_H-1)
uv_map_inds = uv_map_inds.astype(np.int32)
non_zero_inds = | |
out["energy_total"]
scc.energy_total = out["energy_total"]
forces = out["forces"]
if forces is not None:
scc.atom_forces = forces[:, 2:].astype(float) * ureg.hartree / ureg.bohr
scc.single_configuration_calculation_to_system_ref = system
scc.single_configuration_to_calculation_method_ref = method
# Band structure
band_structure = out["band_structure"]
if band_structure is not None:
section_band = section_k_band()
section_band.band_structure_kind = "electronic"
section_band.reciprocal_cell = atomutils.reciprocal_cell(system.lattice_vectors.magnitude) * 1 / ureg.meter
segments = band_structure["segments"]
k_points = to_k_points(segments)
for i_seg, segment in enumerate(segments):
section_segment = section_k_band_segment()
start_end = segment["start_end"]
section_segment.band_k_points = k_points[i_seg]
section_segment.band_segm_start_end = start_end
section_segment.number_of_k_points_per_segment = k_points[i_seg].shape[0]
section_band.m_add_sub_section(section_k_band.section_k_band_segment, section_segment)
# Read energies from the f25-file. If the file is not found, the
# band structure is not written in the archive. The meaning of the
# values is given in an appendix of the Crystal manual.
if f25 is not None:
segments = f25["segments"]
prev_energy = None
prev_k_point = None
first_row = segments[0]["first_row"]
fermi_energy = first_row[4]
scc.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree
for i_seg, segment in enumerate(segments):
first_row = segment["first_row"]
cols = int(first_row[0])
rows = int(first_row[1])
energies = segment["energies"]
energies = to_array(cols, rows, energies)
# If a segment starts from the previous point, then
# re-report the energy. This way segments get the same
# treatment in the metainfo whether they are continuous
# or not.
start_k_point = section_band.section_k_band_segment[i_seg].band_k_points[0]
end_k_point = section_band.section_k_band_segment[i_seg].band_k_points[-1]
if prev_k_point is not None and np.allclose(prev_k_point, start_k_point):
energies = np.concatenate(([prev_energy], energies), axis=0)
section_band.section_k_band_segment[i_seg].band_energies = energies[None, :] * ureg.hartree
prev_energy = energies[-1]
prev_k_point = end_k_point
scc.m_add_sub_section(section_single_configuration_calculation.section_k_band, section_band)
# DOS
dos = out["dos"]
if dos is not None:
# Read values and energies from the f25-file. If the file is not
# found, the dos is not written in the archive. The meaning of the
# values is given in an appendix of the Crystal manual.
if f25 is not None:
dos_f25 = f25["dos"]
if dos_f25 is not None:
scc_dos = section_single_configuration_calculation()
scc_dos.single_configuration_calculation_to_system_ref = system
scc_dos.single_configuration_to_calculation_method_ref = method
sec_dos = section_dos()
first_row = dos_f25["first_row"]
cols = int(first_row[0])
rows = int(first_row[1])
de = first_row[3]
fermi_energy = first_row[4]
scc_dos.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree
second_row = dos_f25["second_row"]
start_energy = second_row[1]
sec_dos.dos_energies = (start_energy + np.arange(rows) * de) * ureg.hartree
dos_values = dos_f25["values"]
dos_values = to_array(cols, rows, dos_values)
sec_dos.dos_values = dos_values.T
sec_dos.dos_kind = "electronical"
sec_dos.number_of_dos_values = sec_dos.dos_values.shape[1]
scc_dos.m_add_sub_section(section_single_configuration_calculation.section_dos, sec_dos)
run.m_add_sub_section(section_run.section_single_configuration_calculation, scc_dos)
# Sampling
geo_opt = out["geo_opt"]
if geo_opt is not None:
steps = geo_opt["geo_opt_step"]
if steps is not None:
sampling_method = section_sampling_method()
sampling_method.sampling_method = "geometry_optimization"
sampling_method.geometry_optimization_energy_change = out["energy_change"]
sampling_method.geometry_optimization_geometry_change = out["geometry_change"]
run.m_add_sub_section(section_run.section_sampling_method, sampling_method)
fs = section_frame_sequence()
run.m_add_sub_section(section_run.section_frame_sequence, fs)
# First step is special: it refers to the initial system which
# was printed before entering the geometry optimization loop.
i_system = system
i_energy = steps[0]["energy"]
scc.energy_total = i_energy
frames = []
for step in steps[1:]:
i_scc = section_single_configuration_calculation()
i_system = section_system()
i_energy = step["energy"]
if step["labels_positions_nanotube"] is not None:
i_labels_positions = step["labels_positions_nanotube"]
else:
i_labels_positions = step["labels_positions"]
i_atomic_numbers = i_labels_positions[:, 2]
i_atom_labels = i_labels_positions[:, 3]
i_atom_pos = i_labels_positions[:, 4:7]
i_lattice_parameters = step["lattice_parameters"]
i_cart_pos, i_atomic_numbers, i_atom_labels, i_lattice_vectors = to_system(
i_atomic_numbers,
i_atom_labels,
i_atom_pos,
i_lattice_parameters,
pos_type,
)
i_system.atom_species = i_atomic_numbers
i_system.atom_labels = i_atom_labels
i_system.atom_positions = i_cart_pos
i_system.lattice_vectors = i_lattice_vectors
i_system.configuration_periodic_dimensions = pbc
i_scc.energy_total = i_energy
i_scc.single_configuration_calculation_to_system_ref = i_system
i_scc.single_configuration_to_calculation_method_ref = method
run.m_add_sub_section(section_run.section_system, i_system)
run.m_add_sub_section(section_run.section_single_configuration_calculation, i_scc)
frames.append(i_scc)
fs.frame_sequence_local_frames_ref = frames
fs.number_of_frames_in_sequence = len(fs.frame_sequence_local_frames_ref)
fs.frame_sequence_to_sampling_ref = sampling_method
fs.geometry_optimization_converged = geo_opt["converged"] == "CONVERGED"
# Remove ghost atom information. The metainfo does not provide a very
# good way to deal with them currently so they are simply removed.
remove_ghosts(run)
def to_k_points(segments):
"""Converts the given start and end points, the shrinking factor and the
number of steps into a list of concrete sampling points in k-space. The
shrinking factor tells to how many portions one reciprocal basis vector is
divided into. This needs to be done manually as sometimes the k-points are
not reported in the output.
"""
all_k_points = []
prev_point = None
for segment in segments:
start = segment["start_end"][0, :]
end = segment["start_end"][1, :]
shrinking_factor = segment["shrinking_factor"]
n_steps = segment["n_steps"]
# Segments that do not start from a previous segment get special
# treatment.
end_idx = n_steps + 1
if prev_point is None or not np.allclose(prev_point, start):
end_idx = n_steps
n_steps = n_steps - 1
delta = end - start
start_step = (shrinking_factor * start).astype(np.int)
step_size = (shrinking_factor * delta / n_steps).astype(np.int)
steps = (start_step + step_size * np.arange(0, end_idx)[:, None])
k_points = steps / shrinking_factor
all_k_points.append(k_points)
prev_point = end
return all_k_points
def to_system(atomic_numbers, labels, positions, lattice, pos_type="scaled", wrap=False):
"""Converts a Crystal-specific structure format into cartesian positions
and lattice vectors (if present). The conversion depends on the material
type.
"""
atomic_numbers = std_atomic_number(atomic_numbers.astype(np.int))
atom_labels = std_label(labels)
positions = positions.astype(np.float64)
# Get the lattice vectors
if lattice is not None:
if lattice.shape == (6,):
lattice_vectors = atomutils.cellpar_to_cell(lattice, degrees=True)
elif lattice.shape == (3, 3):
lattice_vectors = lattice
else:
lattice_vectors = None
# Convert positions based on the given type
if pos_type == "cartesian":
if lattice_vectors is not None and wrap:
cart_pos = atomutils.wrap_positions(positions, lattice_vectors)
else:
cart_pos = positions
elif pos_type == "slab":
n_atoms = atomic_numbers.shape[0]
scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64)
scaled_pos[:, 0:2] = positions[:, 0:2]
if wrap:
wrapped_pos = atomutils.wrap_positions(scaled_pos)
else:
wrapped_pos = scaled_pos
cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors)
cart_pos[:, 2:3] = positions[:, 2:3]
elif pos_type == "nanotube":
n_atoms = atomic_numbers.shape[0]
scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64)
scaled_pos[:, 0:1] = positions[:, 0:1]
if wrap:
wrapped_pos = atomutils.wrap_positions(scaled_pos)
else:
wrapped_pos = scaled_pos
cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors)
cart_pos[:, 1:3] = positions[:, 1:3]
elif pos_type == "scaled":
scaled_pos = atomutils.wrap_positions(positions) if wrap else positions
cart_pos = atomutils.to_cartesian(scaled_pos, lattice_vectors)
if lattice_vectors is not None:
lattice_vectors *= ureg.angstrom
return cart_pos * ureg.angstrom, atomic_numbers, atom_labels, lattice_vectors
def to_float(value):
"""Transforms the Crystal-specific float notation into a floating point
number.
"""
base, exponent = value.split("**")
base = int(base)
exponent = int("".join(exponent.split()))
return pow(base, exponent)
def to_array(cols, rows, values):
"""Transforms the Crystal-specific f25 array syntax into a numpy array.
"""
values.replace("\n", "")
values = textwrap.wrap(values, 12)
values = np.array(values, dtype=np.float64)
values = values.reshape((rows, cols))
return values
def std_atomic_number(value):
"""Given an atomic numer in the NAT form (conventional atomic number, where
the real atomic number is the remainder when divided by 100), return the
actual atomic number.
"""
return value % 100
def remove_ghosts(run):
"""Removes ghost atoms from the given section_system. In Crystal ghost
atoms are indicated by the atomic number 0.
"""
for system in run.section_system:
ghosts_mask = system.atom_species == 0
if np.any(ghosts_mask):
system.atom_species = np.delete(system.atom_species, ghosts_mask)
system.atom_labels = np.delete(system.atom_labels, ghosts_mask)
system.atom_positions = np.delete(system.atom_positions.magnitude, ghosts_mask, axis=0)
def label_to_atomic_number(value):
"""Given a Crystal specific uppercase species name, returns the
corresponding atomic number.
"""
symbol = value.lower().capitalize()
atomic_number = ase.data.atomic_numbers[symbol]
return atomic_number
def atomic_numbers_to_labels(value):
"""Given a NAT atomic number, returns the
corresponding label.
"""
atomic_numbers = std_atomic_number(value)
labels = np.array(ase.data.chemical_symbols)[atomic_numbers]
return labels
def std_label(value):
"""Given Crystal specific uppercase species names, returns the capitalized
versions.
"""
labels = []
for label in value:
labels.append(label.lower().capitalize())
return labels
def to_unix_time(value):
"""Transforms the Crystal-specific float notation into a floating point
number.
"""
if value is None:
return None
value = value.strip()
date_time_obj = datetime.datetime.strptime(value, '%d %m %Y TIME %H:%M:%S.%f')
return date_time_obj.timestamp()
def to_libxc(exchange, correlation, exchange_correlation):
"""Transforms the Crystal-specific XC naming into a list of
section_XC_functionals.
"""
xc_list = []
# Handle the XC's defined with single shortcut
if exchange_correlation:
exchange_correlation = exchange_correlation.upper()
shortcut_map = {
"PBEXC": ["GGA_C_PBE", "GGA_X_PBE"],
"PBE0": ["HYB_GGA_XC_PBEH"],
"B3LYP": ["HYB_GGA_XC_B3LYP"],
"HSE06": ["HYB_GGA_XC_HSE06"],
"M06": ["HYB_MGGA_XC_M06"],
"M05-2X": ["HYB_MGGA_XC_M05_2X"],
"LC-WPBE": ["HYB_GGA_XC_LRC_WPBE"],
}
norm_xc = shortcut_map.get(exchange_correlation)
if norm_xc:
xc_list.extend(norm_xc)
# Handle the exchange part
if exchange:
exchange = exchange.upper()
exchange_map = {
"PBE": "GGA_X_PBE",
"PBESOL": "GGA_X_PBE_SOL",
"BECKE": "GGA_X_B88",
"LDA": "LDA_X",
"PWGGA": "GGA_X_PW91",
}
norm_x = exchange_map.get(exchange)
if norm_x:
xc_list.append(norm_x)
# Handle the correlation part
if correlation:
correlation = correlation.upper()
correlation_map = {
"PBE": "GGA_C_PBE",
"PBESOL": "GGA_C_PBE_SOL",
"PZ": "LDA_C_PZ",
"WFN": "LDA_C_VWN",
"PWGGA": "GGA_C_PW91",
}
norm_c = correlation_map.get(correlation)
if norm_c:
xc_list.append(norm_c)
# Go throught the XC list and add the sections and gather a summary
functionals = []
for xc in xc_list:
section = section_XC_functionals()
weight = 1.0
section.XC_functional_name = xc
section.XC_functional_weight = weight
functionals.append(section)
return functionals
def to_libxc_out(xc, hybridization):
"""Transforms the Crystal-specific XC naming in the | |
return val == 'no', None
Restriction = namedtuple('Restriction', 'kind parse')
restrictions = {
'maxwidth': Restriction('width', _metres),
'maxlength': Restriction('length', _metres),
'maxheight': Restriction('height', _metres),
'maxweight': Restriction('weight', _tonnes),
'maxaxleload': Restriction('wpa', _tonnes),
'hazmat': Restriction('hazmat', _false),
}
hgv_restriction = None
hgv_restriction_shield_text = None
for osm_key, restriction in restrictions.items():
osm_val = props.pop(osm_key, None)
if osm_val is None:
continue
restricted, shield_text = restriction.parse(osm_val)
if not restricted:
continue
if hgv_restriction is None:
hgv_restriction = restriction.kind
hgv_restriction_shield_text = shield_text
else:
hgv_restriction = 'multiple'
hgv_restriction_shield_text = None
if hgv_restriction:
props['hgv_restriction'] = hgv_restriction
if hgv_restriction_shield_text:
props['hgv_restriction_shield_text'] = hgv_restriction_shield_text
if 0 < len(hgv_restriction_shield_text) < 7:
props['hgv_restriction_shield_text_length'] = str(len(hgv_restriction_shield_text))
return shape, props, fid
def load_collision_ranker(fh):
import yaml
from vectordatasource.collision import CollisionRanker
data = yaml.load(fh)
assert isinstance(data, list)
return CollisionRanker(data)
def add_collision_rank(ctx):
"""
Add or update a collision_rank property on features in the given layers.
The collision rank is looked up from a YAML file consisting of a list of
filters (same syntax as in kind/min_zoom YAML) and "_reserved" blocks.
Collision rank indices are automatically assigned based on where in the
list a matching filter is found.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
ranker = ctx.resources.get('ranker')
where = ctx.params.get('where')
assert ranker, 'add_collision_rank: missing ranker resource'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
if where:
where = compile(where, 'queries.yaml', 'eval')
for layer in feature_layers:
layer_name = layer['layer_datum']['name']
for shape, props, fid in layer['features']:
# use the "where" clause to limit the selection of features which
# we add collision_rank to.
add_collision_rank = True
if where:
local = defaultdict(lambda: None)
local.update(props)
local['layer_name'] = layer_name
local['_has_name'] = _has_name(props)
add_collision_rank = eval(where, {}, local)
if add_collision_rank:
props_with_layer = props.copy()
props_with_layer['$layer'] = layer_name
rank = ranker((shape, props_with_layer, fid))
if rank is not None:
props['collision_rank'] = rank
return None
# mappings from the fclass_XXX values in the Natural Earth disputed areas data
# to the matching Tilezen kind.
_REMAP_VIEWPOINT_KIND = {
'Disputed (please verify)': 'disputed',
'Indefinite (please verify)': 'indefinite',
'Indeterminant frontier': 'indeterminate',
'International boundary (verify)': 'country',
'Lease limit': 'lease_limit',
'Line of control (please verify)': 'line_of_control',
'Overlay limit': 'overlay_limit',
'Unrecognized': 'unrecognized_country',
'Map unit boundary': 'map_unit',
'Breakaway': 'disputed_breakaway',
'Claim boundary': 'disputed_claim',
'Elusive frontier': 'disputed_elusive',
'Reference line': 'disputed_reference_line',
'Admin-1 region boundary': 'macroregion',
'Admin-1 boundary': 'region',
'Admin-1 statistical boundary': 'region',
'Admin-1 statistical meta bounds': 'region',
'1st Order Admin Lines': 'region',
'Unrecognized Admin-1 region boundary': 'unrecognized_macroregion',
'Unrecognized Admin-1 boundary': 'unrecognized_region',
'Unrecognized Admin-1 statistical boundary': 'unrecognized_region',
'Unrecognized Admin-1 statistical meta bounds': 'unrecognized_region',
}
def remap_viewpoint_kinds(shape, props, fid, zoom):
"""
Remap Natural Earth kinds in kind:* country viewpoints into the standard
Tilezen nomenclature.
"""
for key in props.keys():
if key.startswith('kind:') and props[key] in _REMAP_VIEWPOINT_KIND:
props[key] = _REMAP_VIEWPOINT_KIND[props[key]]
return (shape, props, fid)
def _list_of_countries(value):
"""
Parses a comma or semicolon delimited list of ISO 3166-1 alpha-2 codes,
discarding those which don't match our expected format. We also allow a
special pseudo-country code "iso".
Returns a list of lower-case, stripped country codes (plus "iso").
"""
from re import match
from re import split
countries = []
candidates = split('[,;]', value)
for candidate in candidates:
# should have an ISO 3166-1 alpha-2 code, so should be 2 ASCII
# latin characters.
candidate = candidate.strip().lower()
if candidate == 'iso' or match('[a-z][a-z]', candidate):
countries.append(candidate)
return countries
def unpack_viewpoint_claims(shape, props, fid, zoom):
"""
Unpack OSM "claimed_by" and "disputed_by" lists into viewpoint kinds.
For example; "claimed_by=AA;BB;CC" should become "kind:aa=country,
kind:bb=country, kind:cc=country" (or region, etc... as appropriate for
the main kind, which should be "unrecognized_TYPE".
Additionally, "recognized_by=XX;YY;ZZ" indicates that these viewpoints,
although they don't claim the territory, recognize the claim and should
see it in their viewpoint as a country/region/county.
"""
claimed_by = props.get('claimed_by', '')
recognized_by = props.get('recognized_by', '')
disputed_by = props.get('disputed_by', '')
admin_level = str(props.get('tz_admin_level', ''))
if admin_level:
base_kind = _ADMIN_LEVEL_TO_KIND.get(admin_level, '')
if not base_kind:
base_kind = 'debug'
for viewpoint in _list_of_countries(claimed_by):
props['kind:' + viewpoint] = base_kind
for viewpoint in _list_of_countries(recognized_by):
props['kind:' + viewpoint] = base_kind
for viewpoint in _list_of_countries(disputed_by):
props['kind:' + viewpoint] = 'unrecognized'
return shape, props, fid
class _DisputeMasks(object):
"""
Creates a "mask" of polygons by buffering disputed border lines and
provides an interface through cut() to intersect other border lines and
apply kind:xx=unrecognized_* to them.
This allows us to handle disputed borders - we effectively clip them out
of the disputant's viewpoint by setting a property that will hide them.
"""
def __init__(self, buffer_distance):
self.buffer_distance = buffer_distance
self.masks = []
def add(self, shape, props):
from shapely.geometry import CAP_STYLE
from shapely.geometry import JOIN_STYLE
disputed_by = props.get('disputed_by', '')
disputants = _list_of_countries(disputed_by)
recognizants = _list_of_countries(props.get('recognized_by', ''))
claimants = _list_of_countries(props.get('claimed_by', ''))
if disputants:
# we use a flat cap to avoid straying too much into nearby lines
# and a mitred join to avoid creating extra geometry points to
# represent the curve, as this slows down intersection checks.
buffered_shape = shape.buffer(
self.buffer_distance, CAP_STYLE.flat, JOIN_STYLE.mitre)
self.masks.append((buffered_shape, disputants, recognizants, claimants))
def empty(self):
return not self.masks
def cut(self, shape, props, fid):
"""
Cut the (shape, props, fid) feature against the masks to apply the
dispute to the boundary by setting 'kind:xx' to unrecognized.
"""
updated_features = []
for mask_shape, disputants, recognizants, claimants in self.masks:
# we don't want to override a kind:xx if it has already been set
# (e.g: by a claim), so we filter out disputant viewpoints where
# a kind override has already been set.
#
# this is necessary for dealing with the case where a border is
# both claimed and disputed in the same viewpoint.
non_claim_disputants = []
for disputant in disputants:
key = 'kind:' + disputant
if key not in props:
non_claim_disputants.append(disputant)
if shape.intersects(mask_shape):
cut_shape = shape.intersection(mask_shape)
cut_shape = _filter_geom_types(cut_shape, _LINE_DIMENSION)
shape = shape.difference(mask_shape)
shape = _filter_geom_types(shape, _LINE_DIMENSION)
if not cut_shape.is_empty:
new_props = props.copy()
for disputant in non_claim_disputants:
new_props['kind:' + disputant] = 'unrecognized'
for recognizant in recognizants:
new_props['kind:' + recognizant] = 'country'
for claimant in claimants:
new_props['kind:' + claimant] = 'country'
new_props['kind'] = 'disputed_reference_line'
updated_features.append((cut_shape, new_props, None))
if not shape.is_empty:
updated_features.append((shape, props, fid))
return updated_features
# tuple of boundary kind values on which we should set alternate viewpoints
# from disputed_by ways.
_BOUNDARY_KINDS = ('country', 'region', 'county', 'locality',
'aboriginal_lands')
def apply_disputed_boundary_viewpoints(ctx):
"""
Use the dispute features to apply viewpoints to the admin boundaries.
We take the 'mz_internal_dispute_mask' features and build a mask from them.
The mask is used to move the information from 'disputed_by' lists on the
mask features to 'kind:xx' overrides on the boundary features. The mask
features are discarded afterwards.
"""
params = _Params(ctx, 'apply_disputed_boundary_viewpoints')
layer_name = params.required('base_layer')
start_zoom = params.optional('start_zoom', typ=int, default=0)
end_zoom = params.optional('end_zoom', typ=int)
layer = _find_layer(ctx.feature_layers, layer_name)
zoom = ctx.nominal_zoom
if zoom < start_zoom or \
(end_zoom is not None and zoom >= end_zoom):
return None
# we tried intersecting lines against lines, but this often led to a sort
# of "dashed pattern" in the output where numerical imprecision meant two
# lines don't quite intersect.
#
# we solve this by buffering out the shape by a small amount so that we're
# more likely to get a clean cut against the boundary line.
#
# tolerance for zoom is the length of 1px at 256px per tile, so we can take
# a fraction of that to get sub-pixel alignment.
buffer_distance = 0.1 * tolerance_for_zoom(zoom)
# first, separate out the dispute mask geometries
masks = _DisputeMasks(buffer_distance)
# features that we're going to return
new_features = []
# boundaries, which we pull out separately to apply the disputes to
boundaries = []
for shape, props, fid in layer['features']:
kind = props.get('kind')
if kind == 'mz_internal_dispute_mask':
masks.add(shape, props)
elif kind in _BOUNDARY_KINDS:
boundaries.append((shape, props, fid))
# we want to apply disputes to already generally-unrecognised borders
# too, as this allows for multi-level fallback from one viewpoint
# possibly through several others before reaching the default.
elif kind.startswith('unrecognized_'):
if kind[len('unrecognized_'):] in _BOUNDARY_KINDS:
boundaries.append((shape, props, fid))
else:
# pass through this feature - we just ignore it.
new_features.append((shape, props, fid))
# | |
<filename>ansys/dpf/core/server.py
"""
Server
======
Contains the directives necessary to start the DPF server.
"""
from threading import Thread
import io
import platform
import logging
import time
import os
import socket
import subprocess
import grpc
import psutil
import weakref
import atexit
import copy
from ansys import dpf
from ansys.dpf.core.misc import find_ansys, is_ubuntu
from ansys.dpf.core import errors
from ansys.dpf.core._version import __ansys_version__
MAX_PORT = 65535
LOG = logging.getLogger(__name__)
LOG.setLevel("DEBUG")
# default DPF server port
DPF_DEFAULT_PORT = int(os.environ.get("DPF_PORT", 50054))
LOCALHOST = os.environ.get("DPF_IP", "127.0.0.1")
def shutdown_global_server():
try:
if dpf.core.SERVER != None:
del dpf.core.SERVER
except:
pass
atexit.register(shutdown_global_server)
def has_local_server():
"""Check if a local DPF gRPC server has been created.
Returns
-------
bool
``True`` when a local DPF gRPC server has been created.
"""
return dpf.core.SERVER is not None
def _global_server():
"""Retrieve the global server if it exists.
If the global server has not been specified, check if the user
has specified the "DPF_START_SERVER" environment variable. If
``True``, start the server locally. If ``False``, connect to the
existing server.
"""
if dpf.core.SERVER is None:
if os.environ.get("DPF_START_SERVER", "").lower() == "false":
ip = os.environ.get("DPF_IP", LOCALHOST)
port = int(os.environ.get("DPF_PORT", DPF_DEFAULT_PORT))
connect_to_server(ip, port)
else:
start_local_server()
return dpf.core.SERVER
def port_in_use(port, host=LOCALHOST):
"""Check if a port is in use at the given host.
The port must actually "bind" the address. Just checking to see if a
socket can be created is insufficient because it's possible to run into
permission errors like: ``An attempt was made to access a socket in a way
forbidden by its access permissions.``
Returns
-------
bool
``True`` when the port is in use, ``False`` when free.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.bind((host, port))
return False
except:
return True
def check_valid_ip(ip):
"""Check if a valid IP address is entered.
This method raises an error when an invalid IP address is entered.
"""
try:
socket.inet_aton(ip)
except OSError:
raise ValueError(f'Invalid IP address "{ip}"')
def shutdown_all_session_servers():
"""Shut down all active servers created by this module."""
from ansys.dpf.core import _server_instances
copy_instances = copy.deepcopy(_server_instances)
for instance in copy_instances:
try:
instance().shutdown()
except Exception as e:
print(e.args)
pass
def start_local_server(
ip=LOCALHOST,
port=DPF_DEFAULT_PORT,
ansys_path=None,
as_global=True,
load_operators=True,
):
"""Start a new local DPF server at a given port and IP address.
This method requires Windows and ANSYS 2021 R1 or later. If ``as_global=True``, which is
the default) the server is stored globally, replacing the one stored previously.
Otherwise, a user must keep a handle on their server.
Parameters
----------
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default is ``True``.
Returns
-------
server : server.DpfServer
"""
if ansys_path is None:
ansys_path = os.environ.get("AWP_ROOT" + __ansys_version__, find_ansys())
if ansys_path is None:
raise ValueError(
"Unable to automatically locate the Ansys path. "
"Manually enter one when starting the server or set it "
'as the environment variable "ANSYS_PATH"'
)
# verify path exists
if not os.path.isdir(ansys_path):
raise NotADirectoryError(f'Invalid Ansys path "{ansys_path}"')
# parse the version to an int and check for supported
try:
ver = int(ansys_path[-3:])
if ver < 211:
raise errors.InvalidANSYSVersionError(f"Ansys v{ver} does not support DPF")
if ver == 211 and is_ubuntu():
raise OSError("DPF on v211 does not support Ubuntu")
except ValueError:
pass
# avoid using any ports in use from existing servers
used_ports = []
if dpf.core._server_instances:
for srv in dpf.core._server_instances:
if srv():
used_ports.append(srv().port)
while port in used_ports:
port += 1
# verify port is free
while port_in_use(port):
port += 1
server = None
n_attempts = 10
for _ in range(n_attempts):
try:
server = DpfServer(
ansys_path, ip, port, as_global=as_global, load_operators=load_operators
)
break
except errors.InvalidPortError: # allow socket in use errors
port += 1
if server is None:
raise OSError(
f"Unable to launch the server after {n_attempts} attempts. "
"Check the following path:\n{ansys_path}\n\n"
"or attempt to use a different port"
)
dpf.core._server_instances.append(weakref.ref(server))
return server
def connect_to_server(ip=LOCALHOST, port=DPF_DEFAULT_PORT, as_global=True, timeout=5):
"""Connect to an existing DPF server.
This method sets the global default channel that is then used for the
duration of the DPF sesssion.
Parameters
----------
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
Examples
--------
>>> from ansys.dpf import core as dpf
Create a server.
>>> #server = dpf.start_local_server(ip = '127.0.0.1')
>>> #port = server.port
Connect to a remote server at a non-default port.
>>> #specified_server = dpf.connect_to_server('127.0.0.1', port, as_global=False)
Connect to the localhost at the default port.
>>> #unspecified_server = dpf.connect_to_server(as_global=False)
"""
server = DpfServer(ip=ip, port=port, as_global=as_global, launch_server=False)
dpf.core._server_instances.append(weakref.ref(server))
return server
class DpfServer:
"""Provides an instance of the DPF server.
Parameters
-----------
server_bin : str
Path for the DPF executable.
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default
is ``True``.
launch_server : bool, optional
Whether to launch the server on Windows.
"""
def __init__(
self,
ansys_path="",
ip=LOCALHOST,
port=DPF_DEFAULT_PORT,
timeout=10,
as_global=True,
load_operators=True,
launch_server=True,
):
"""Start the DPF server."""
# check valid ip and port
check_valid_ip(ip)
if not isinstance(port, int):
raise ValueError("Port must be an integer")
if os.name == "posix" and "ubuntu" in platform.platform().lower():
raise OSError("DPF does not support Ubuntu")
elif launch_server:
launch_dpf(ansys_path, ip, port)
self.channel = grpc.insecure_channel("%s:%d" % (ip, port))
if launch_server is False:
state = grpc.channel_ready_future(self.channel)
# verify connection has matured
tstart = time.time()
while ((time.time() - tstart) < timeout) and not state._matured:
time.sleep(0.01)
if not state._matured:
raise TimeoutError(
f"Failed to connect to {ip}:{port} in {timeout} seconds"
)
LOG.debug("Established connection to DPF gRPC")
# assign to global channel when requested
if as_global:
dpf.core.SERVER = self
# TODO: add to PIDs ...
# store port and ip for later reference
self.live = True
self.ansys_path = ansys_path
self._input_ip = ip
self._input_port = port
self._own_process = launch_server
@property
def _base_service(self):
if not hasattr(self, "__base_service"):
from ansys.dpf.core.core import BaseService
self.__base_service = BaseService(self, timeout=1)
return self.__base_service
@property
def info(self):
"""Server information.
Returns
-------
info : dictionary
Dictionary with server information, including ``"server_ip"``,
``"server_port"``, ``"server_process_id"``, and
``"server_version"`` keys.
"""
return self._base_service.server_info
@property
def ip(self):
"""IP address of the server.
Returns
-------
ip : str
"""
try:
return self._base_service.server_info["server_ip"]
except:
return ""
@property
def port(self):
"""Port of the server.
Returns
-------
port : int
"""
try:
return self._base_service.server_info["server_port"]
except:
return 0
@property
def version(self):
"""Version of the server.
Returns
-------
version : str
"""
return self._base_service.server_info["server_version"]
def __str__(self):
return f"DPF Server: {self.info}"
def shutdown(self):
if self._own_process and self.live and self._base_service:
self._base_service._prepare_shutdown()
p = psutil.Process(self._base_service.server_info["server_process_id"])
p.kill()
time.sleep(0.1)
self.live = False
try:
if id(dpf.core.SERVER) == id(self):
dpf.core.SERVER = None
except:
pass
try:
for i, server in enumerate(dpf.core._server_instances):
if server() == self:
dpf.core._server_instances.remove(server)
except:
pass
def __eq__(self, other_server):
| |
import numpy as np
from numpy.linalg import norm
from functools import lru_cache
from tqdm import tqdm
from scipy.optimize import linprog
from sklearn.metrics import accuracy_score, f1_score
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'errorbar.capsize': 2})
def sq(a):
return np.dot(a, a)
def cluster_score(data, target, score_type='trace_w'):
# target 0...max
num_class = target.max() + 1
score = 0
for i in range(num_class):
s = 0
sub_data = data[target==i]
mean_vector = sub_data.mean(axis=0)
for x in sub_data:
s += sq(x-mean_vector)
if score_type != 'trace_w':
s /= len(sub_data)
score += s
return score
def get_weights_gap(code_matrix, dich_classifiers=None, weights_type=None):
l, N = code_matrix.shape
c = np.zeros(N+1)
c[-1] = -1
# размер A Nx (l*(l-1)/2)
A_ub = []
b_ub = np.zeros(l*(l-1)//2)
for nu in range(l):
for mu in range(nu+1, l):
A_arr = []
for j in range(N): # кол-во дихотомий
diff_munu = code_matrix[nu][j] - code_matrix[mu][j]
if weights_type is not None:
if weights_type == 'confusion_list':
score = dich_classifiers[j][weights_type][mu]#, nu].mean() #maybe dirty hack
else:
score = dich_classifiers[j][weights_type]
if diff_munu == 1:
diff_munu = score
else:
diff_munu = 1-score
A_arr.append(-np.abs(diff_munu))
A_arr.append(1)
A_ub.append(A_arr)
A_ub = np.array(A_ub)
A_ub = np.vstack([A_ub, -np.eye(N+1)[:-1]]) # x_i >= 0
b_ub = np.append(b_ub, np.zeros(N))
A_eq = np.ones(N+1).reshape((1, -1))
A_eq[0][-1] = 0
b_eq = np.array(N).reshape((-1))
opt_result = linprog(c, A_ub, b_ub, A_eq, b_eq, options={'disp': False})
return opt_result['x'][:-1] # last value is gap
def ex(arr, j, i):
return np.exp(-norm(arr[i] - arr[j])**2)
def p(arr, j, i):
a = ex(arr, j, i)
b = sum(ex(arr, k, i) for k in range(len(arr)) if k!=i)
return a / b
def d(arr, i, i1, i2):
# return np.abs(arr[i, i2] - arr[j, i2])
return 2*(arr[i1, i2] - arr[i, i2])
def norm1(i, j):
return norm(arr1[i] - arr1[j])**2
def cost(arr1, arr2):
@lru_cache(maxsize=None)
def norm1(i, j):
return norm(arr1[i] - arr1[j])**2
@lru_cache(maxsize=None)
def ex1(i, j):
return np.exp(-norm1(i, j))
@lru_cache(maxsize=None)
def p1(j, i):
a = ex1(j, i)
b = sum(ex1(k, i) for k in range(len(arr1)) if k!=i)
return a / b
@lru_cache(maxsize=None)
def norm2(i, j):
return norm(arr2[i] - arr2[j])**2
@lru_cache(maxsize=None)
def ex2(i, j):
return np.exp(-norm2(i, j))
@lru_cache(maxsize=None)
def p2(j, i):
a = ex2(j, i)
b = sum(ex2(k, i) for k in range(len(arr2)) if k!=i)
return a / b
s = 0
for i in range(len(arr1)):
for j in range(len(arr1)):
s += p1(j, i) * np.log(p1(j, i) / p2(j, i))
return s
def get_grad(arr1, arr2, i1, i2):
'''
arr1 - массив без пропусков(укороченный)
arr2 - массив с прочерками(удлиенный)
i1, i2 - координаты nan
'''
@lru_cache(maxsize=None)
def norm1(i, j):
return norm(arr1[i] - arr1[j])
@lru_cache(maxsize=None)
def ex1(i, j):
return np.exp(-norm1(i, j))
@lru_cache(maxsize=None)
def p1(j, i):
a = ex1(j, i)
b = sum(ex1(k, i) for k in range(len(arr1)) if k!=i)
return a / b
@lru_cache(maxsize=None)
def norm2(i, j):
return norm(arr2[i] - arr2[j])
@lru_cache(maxsize=None)
def ex2(i, j):
return np.exp(-norm2(i, j))
@lru_cache(maxsize=None)
def p2(j, i):
a = ex2(j, i)
b = sum(ex2(k, i) for k in range(len(arr2)) if k!=i)
return a / b
@lru_cache(maxsize=None)
def d(i, i1):
'''
"Дистанция после дифференцирования" - то же самое, только arr == arr2 и i2 == i2
'''
dist = 2*(arr2[i1, i2] - arr2[i, i2])
return dist
def get_i_part(i):
'''
считаем i часть суммы
'''
s = 0
s += p1(i1, i) + p1(i, i1)
s -= p2(i1, i)*(1 + p1(i, i))
s -= p2(i, i1)*(1 + p1(i1, i1))
return s * d(i, i1)
# if verbose:
# grad = sum(get_i_part(i) for i in tqdm(range(len(arr1))) if i!=i1)
# else:
grad = sum(get_i_part(i) for i in range(len(arr1)) if i!=i1)
return grad
def get_full_grad(arr1, arr2, nan_coords, verbose=False):
'''
arr1 - массив без пропусков(укороченный)
arr2 - массив с прочерками(удлиенный)
i1, i2 - координаты nan
'''
grads = []
if verbose:
for i1, i2 in tqdm(nan_coords):
grads.append(get_grad(arr1, arr2, i1, i2))
else:
for i1, i2 in nan_coords:
grads.append(get_grad(arr1, arr2, i1, i2))
return np.array(grads)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
def get_mae(arr1, arr2, nan_coords):
vec1 = []
vec2 = []
for j, (x,y) in enumerate(nan_coords):
vec1.append(arr1[x, y])
vec2.append(arr2[x, y])
return mean_absolute_error(vec1, vec2)
def get_mape(arr1, arr2, nan_coords):
vec1 = []
vec2 = []
for j, (x,y) in enumerate(nan_coords):
vec1.append(arr1[x, y])
vec2.append(arr2[x, y])
return mean_absolute_percentage_error(np.array(vec1), np.array(vec2))
def get_rmse(arr1, arr2, nan_coords):
vec1 = []
vec2 = []
for j, (x,y) in enumerate(nan_coords):
vec1.append(arr1[x, y])
vec2.append(arr2[x, y])
return np.sqrt(mean_squared_error(vec1, vec2))
def get_rmspe(arr1, arr2, nan_coords):
vec1 = []
vec2 = []
for j, (x,y) in enumerate(nan_coords):
vec1.append(arr1[x, y])
vec2.append(arr2[x, y])
vec1 = np.array(vec1)
vec2 = np.array(vec2)
pi = np.abs(vec1-vec2) / vec1
return np.mean(100*pi)
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
def get_acc(arr2, target):
return 0
# df_acc = pd.DataFrame(arr2)
# df_acc['target'] = target
forest = RandomForestClassifier()
return cross_val_score(forest, arr2, target, scoring='accuracy', cv=7).mean()
def set_nans(df0, seed, num_nan_cols, nan_fraction):
df = df0.copy()
np.random.seed(seed)
if num_nan_cols >= 0:
nan_cols = np.random.random_integers(0, df.shape[1] - 1, num_nan_cols)
for col in set(nan_cols):
df.loc[df.sample(int(nan_fraction * len(df))).index, col] = np.nan
nan_coords = np.array(np.where(df.isnull().values)).T
else:
all_pairs = np.array([[i,j] for i in range(df.shape[0]) for j in range(df.shape[1])])
nan_places = np.random.choice(np.arange(0, df.size), size=int(nan_fraction*df.size), replace=False)
nan_coords = all_pairs[nan_places]
# df.iloc[nan_coors[:,0], nan_coors[:,1]] = None
for x,y in nan_coords:
df.iloc[x, y] = np.nan
print('Num nan places: {}'.format(nan_coords.shape[0]))
df1 = df.loc[:, df.isnull().sum() == 0]
df2 = df.fillna(df.mean())
print(df1.shape, df2.shape)
arr_nan = df.values # с пропусками
arr_raw = df0.values # исходные
arr_known = df1.values # суженные до известных признаков
arr_pred = df2.values # текущие предсказанные
return df, df1, df2, arr_nan, arr_raw, arr_known, arr_pred, nan_coords
def Cnk(n, k):
a = b = c = tmp = 1
for i in range(1, n+1):
tmp *= i
if i == n-k:
a = tmp
if i == k:
b = tmp
if i == n:
c = tmp
return c / (a*b)
def predict_codeword(x, dich_classifiers):
codeword = []
for dich in dich_classifiers:
clf = dich['model']
codeword.append(clf.predict(x.reshape(1, -1)))
return np.array(codeword).flatten()
def hamming(arr1, arr2, scores=None, weights=1):
# print(arr1, arr2, scores)
if scores is None:
return (arr1 != arr2).sum()
return ((arr1 != arr2)*scores*weights).sum() + ((arr1 == arr2)*(1-scores)*weights).sum()
def predict_class(x, dich_classifiers, code_matrix, score_type=None, weights=1, verbose=False):
codeword = predict_codeword(x, dich_classifiers)
if not score_type:
hammings = np.array([hamming(codeword, class_code, weights=weights) for class_code in code_matrix])
else:
scores = np.array([d[score_type] for d in dich_classifiers])
if score_type == 'confusion_list':
# ПРОВЕРИТЬ ВЕРНО ЛИ ФОРМИРУЮТСЯ ОЦЕНКИ ТУТ
hammings = np.array([hamming(codeword, class_code, scores.T[i], weights=weights) \
for i, class_code in enumerate(code_matrix)])
else:
hammings = np.array([hamming(codeword, class_code, scores) for class_code in code_matrix])
if verbose:
print(hammings)
indices = np.where(hammings == hammings.min())
if len(indices[0]) == 0:
print(hammings, hammings.min(), score_type, scores)
return np.random.choice(indices[0])
def predict_all(X_test, dich_classifiers, code_matrix, score_type=None, weight_type=None):
if weight_type is None:
weights = np.array([1]*len(dich_classifiers))
elif weight_type == -1:
weights = get_weights_gap(code_matrix, dich_classifiers, None)
else:
weights = get_weights_gap(code_matrix, dich_classifiers, weight_type)
num_real_dich = (weights > np.median(weights)/100).sum()
# print('Num dich = {}/{}'.format(num_real_dich, len(weights)))
# print(weights)
preds = [predict_class(x, dich_classifiers, code_matrix, score_type, weights) for x in X_test]
preds = np.array(preds)
return preds, num_real_dich
def int2bin(val, l):
res = np.zeros(l)
i = 0
while val>0:
res[i] = val&1
val = val>>1 # val=val/2
i += 1
return res[::-1]
def add_dich(dich, code_matrix=None):
if code_matrix is None:
return dich.reshape((-1, 1))
return np.hstack([code_matrix, dich.reshape((-1, 1))])
def make_random_dichs(l, N):
if N > 2**(l-1) - 1:
N = 2**(l-1) - 1
print('Dich Num reduced to max={}'.format(N))
code_matrix = None
binary_dich_numbers = np.random.choice(np.arange(0, 2**(l-1) - 1), N, replace=False)
for dich in tqdm(binary_dich_numbers, desc='Adding dich'):
binary_dich = int2bin(dich+1, l)
code_matrix = add_dich(binary_dich, code_matrix)
return code_matrix
def make_random_dichs_old(l, N):
code_matrix = None
for i in tqdm(range(N), desc='Adding dich'):
code_matrix = add_random_dich(l, code_matrix)
return code_matrix
def make_local_optimal_dichotomy(cur_dich, code_matrix, score_function, verbose=0):
cur_score = score_function(cur_dich, code_matrix)
next_score = cur_score
while True:
next_dich = cur_dich.copy()
next_scores = np.zeros(len(cur_dich)) - 1
for i in range(len(cur_dich)):
next_dich = cur_dich.copy()
next_dich[i] = 1 - next_dich[i]
if not does_dich_exist(next_dich, code_matrix): #дихотомия нормальная
next_scores[i] = score_function(next_dich, code_matrix)
next_scores = np.array(next_scores)
next_score = next_scores.max()
#print(next_scores)
if next_score <= cur_score: #идем только на повышение, но можно скор сделать поменьше
break
cur_score = next_score
best_index = np.random.choice(np.flatnonzero(next_scores == next_score)) # it is random of the best
if verbose > 0:
print(cur_dich)
if verbose > 1:
print(next_score, best_index)
cur_dich[best_index] = 1 - cur_dich[best_index]
# if cur_dich.max() == cur_dich.min():
# print(next_scores)
return cur_dich
def make_code_matrix_local(l, N, score_function, verbose=1):
code_matrix = None
for i in tqdm(range(N)):
new_dich = np.random.randint(0, 2, l)
new_dich = | |
<filename>code/python/Vermilion/v1/fds/sdk/Vermilion/api/report_instance_api.py
"""
VRS API documentation
Documentation on all available end points in the VRSAPI # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.Vermilion.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.Vermilion.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.Vermilion.model.inline_object import InlineObject
from fds.sdk.Vermilion.model.inline_response4002 import InlineResponse4002
from fds.sdk.Vermilion.model.inline_response4003 import InlineResponse4003
from fds.sdk.Vermilion.model.inline_response401 import InlineResponse401
from fds.sdk.Vermilion.model.inline_response4011 import InlineResponse4011
from fds.sdk.Vermilion.model.inline_response403 import InlineResponse403
from fds.sdk.Vermilion.model.inline_response4042 import InlineResponse4042
from fds.sdk.Vermilion.model.inline_response4043 import InlineResponse4043
from fds.sdk.Vermilion.model.inline_response406 import InlineResponse406
from fds.sdk.Vermilion.model.report_instance_data import ReportInstanceData
from fds.sdk.Vermilion.model.report_instance_data_response import ReportInstanceDataResponse
from fds.sdk.Vermilion.model.report_instance_list import ReportInstanceList
from fds.sdk.Vermilion.model.report_instance_log_list import ReportInstanceLogList
class ReportInstanceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.v1_report_instances_generate_post_endpoint = _Endpoint(
settings={
'response_type': (ReportInstanceDataResponse,),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/report-instances/generate',
'operation_id': 'v1_report_instances_generate_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'inline_object',
],
'required': [
'inline_object',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'inline_object':
(InlineObject,),
},
'attribute_map': {
},
'location_map': {
'inline_object': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tenant_report_instances_get_endpoint = _Endpoint(
settings={
'response_type': (ReportInstanceList,),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/{tenant}/report-instances',
'operation_id': 'v1_tenant_report_instances_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'tenant',
'report_definition_code',
'entity_codes',
'entity_keys',
'section_filter',
'output_format',
'pagination_limit',
'pagination_offset',
],
'required': [
'tenant',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tenant':
(str,),
'report_definition_code':
(str,),
'entity_codes':
(str,),
'entity_keys':
(str,),
'section_filter':
(str,),
'output_format':
(str,),
'pagination_limit':
(int,),
'pagination_offset':
(int,),
},
'attribute_map': {
'tenant': 'tenant',
'report_definition_code': 'reportDefinitionCode',
'entity_codes': 'entityCodes',
'entity_keys': 'entityKeys',
'section_filter': 'sectionFilter',
'output_format': 'outputFormat',
'pagination_limit': '_paginationLimit',
'pagination_offset': '_paginationOffset',
},
'location_map': {
'tenant': 'path',
'report_definition_code': 'query',
'entity_codes': 'query',
'entity_keys': 'query',
'section_filter': 'query',
'output_format': 'query',
'pagination_limit': 'query',
'pagination_offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tenant_report_instances_report_instance_id_delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/{tenant}/report-instances/{reportInstanceId}',
'operation_id': 'v1_tenant_report_instances_report_instance_id_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'tenant',
'report_instance_id',
],
'required': [
'tenant',
'report_instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tenant':
(str,),
'report_instance_id':
(str,),
},
'attribute_map': {
'tenant': 'tenant',
'report_instance_id': 'reportInstanceId',
},
'location_map': {
'tenant': 'path',
'report_instance_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tenant_report_instances_report_instance_id_get_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/{tenant}/report-instances/{reportInstanceId}',
'operation_id': 'v1_tenant_report_instances_report_instance_id_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'report_instance_id',
'tenant',
],
'required': [
'report_instance_id',
'tenant',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'report_instance_id':
(str,),
'tenant':
(str,),
},
'attribute_map': {
'report_instance_id': 'reportInstanceId',
'tenant': 'tenant',
},
'location_map': {
'report_instance_id': 'path',
'tenant': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tenant_report_instances_report_instance_id_logs_get_endpoint = _Endpoint(
settings={
'response_type': (ReportInstanceLogList,),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/{tenant}/report-instances/{reportInstanceId}/logs',
'operation_id': 'v1_tenant_report_instances_report_instance_id_logs_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'tenant',
'report_instance_id',
'full_log',
'sort',
'pagination_limit',
'pagination_offset',
],
'required': [
'tenant',
'report_instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tenant':
(str,),
'report_instance_id':
(str,),
'full_log':
(str,),
'sort':
(str,),
'pagination_limit':
(int,),
'pagination_offset':
(int,),
},
'attribute_map': {
'tenant': 'tenant',
'report_instance_id': 'reportInstanceId',
'full_log': 'fullLog',
'sort': '_sort',
'pagination_limit': '_paginationLimit',
'pagination_offset': '_paginationOffset',
},
'location_map': {
'tenant': 'path',
'report_instance_id': 'path',
'full_log': 'query',
'sort': 'query',
'pagination_limit': 'query',
'pagination_offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tenant_report_instances_report_instance_id_results_report_file_name_get_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/{tenant}/report-instances/{reportInstanceId}/results/{reportFileName}',
'operation_id': 'v1_tenant_report_instances_report_instance_id_results_report_file_name_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'tenant',
'report_instance_id',
'report_file_name',
],
'required': [
'tenant',
'report_instance_id',
'report_file_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tenant':
(str,),
'report_instance_id':
(str,),
'report_file_name':
(str,),
},
'attribute_map': {
'tenant': 'tenant',
'report_instance_id': 'reportInstanceId',
'report_file_name': 'reportFileName',
},
'location_map': {
'tenant': 'path',
'report_instance_id': 'path',
'report_file_name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def v1_report_instances_generate_post(
self,
inline_object,
**kwargs
):
"""Generates a report # noqa: E501
Generates a report using the specified ID and the JSON in the request body # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_report_instances_generate_post(inline_object, async_req=True)
>>> result = thread.get()
Args:
inline_object (InlineObject):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ReportInstanceDataResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['inline_object'] = \
inline_object
return self.v1_report_instances_generate_post_endpoint.call_with_http_info(**kwargs)
def v1_tenant_report_instances_get(
self,
tenant,
**kwargs
):
"""Gets a list of report instances # noqa: E501
Gets a list of report instances. This can be filtered down further by including query parameters in the URL. For example, a report definition id can be added so the only report instances returned are the ones with a matching id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tenant_report_instances_get(tenant, async_req=True)
>>> result = thread.get()
Args:
tenant (str): The code of the tenancy
Keyword Args:
report_definition_code (str): Acts as a filter for the retrieval process. Filters the report instances that match the report definition code. [optional]
entity_codes (str): A series of query parameters used to filter the report instances by entity code. E.g: entityCode=DATE&entityCode=SYSTEM_LANG. [optional]
entity_keys (str): A series of query parameters used to filter the report instances by entity keys. E.g: entityKey=en-gb. [optional]
section_filter (str): Acts as a filter for the retrieval process. Filters the report instances that match the section filter. [optional]
output_format (str): Acts as a filter for the retrieval process. Filters the report instances that match the output format. [optional]
pagination_limit (int): Non-negative maximum number of entries to return. [optional]
pagination_offset (int): Non-negative number of entries to skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be | |
#!/bin/env python3
# Compress binary data with RLE-like format
import argparse
import os
import sys
def array2data(arr):
formatted_output = ""
counter = 0
for a in arr:
if counter == 0:
formatted_output += "\n\t"
formatted_output += "{0}, ".format(a)
counter = (counter + 1) % 16
return formatted_output[1:] + "\n"
# print as hex
def hx(num):
return "0x{0:02X}".format(num)
# for commands bytes
def hxc(num):
return "(0x{0:02X})".format(num)
def enc_poppable(encoding_pair, amount=0):
cmd = encoding_pair[0]
value = 0
if cmd == ENC_EOD:
sys.exit("{0}: error: 0x00 command in data stream".format(args.image[0]))
if cmd == ENC_MON:
# just skip / we can't handle that yet
return False
if (cmd & 0x80) == 0:
if (cmd & 0x30) == 0: # INC
value = cmd #& 0xF
return value != 0
else: # LIT
return True
elif (cmd & 0xE0) == ENC_ROW:
value = (cmd & 0xE) >> 1
elif (cmd&0xC0) == 0xC0:#those two use a different format
value = (cmd & 0x3E) >> 1
else:
value = cmd & 0x1F
return value-amount > 0
# function for popping last character from encoding
def enc_pop(encoding_pair):
cmd = encoding_pair[0]
if (cmd & 0xF0) == ENC_INC:
value = (cmd & 0xF) - 1 #-EOD
if value == 0:
# had two elements, reduce to verbatim
return [encoding_pair[1][0]+value+1], [ENC_LIT - 1 + ENC_LIT_MIN, encoding_pair[1]]
else:
return [encoding_pair[1][0]+value+1], [cmd-1, encoding_pair[1]]
if (cmd & 0x80) == 0:#verbatim
if cmd == 0:
return [encoding_pair[1][0]], []
else:
return [encoding_pair[1][-1]], [cmd-1, encoding_pair[1][:-1]]
if (cmd & 0xE0) == ENC_ROW:
value = (cmd & 0xE) >> 1
byte1 = 0x00
byte2 = 0x00
if (cmd & 0x10) == 0:
byte1 = 0xFF
if (cmd & 0x1) == 0:
byte2 = 0xFF
if value == 0:
return [byte1, byte2], []
else:
return [byte1, byte2], [cmd-(1<<1)]
value = cmd & 0x1F
if (cmd&0xC0) == 0xC0:#those two use a different format
value = cmd & 0x3E
if (cmd & 0xE0) == ENC_RUN:
value -= 1 # - MON
if (cmd & 0xE0) == ENC_RUN or (cmd & 0xC1) == ENC_ALT:
# encoding_pair[1] can be one or two values
if value == 0:
return encoding_pair[1], [ENC_LIT - 1 + ENC_LIT_MIN, encoding_pair[1]]
else:
if (cmd & 0xC1) == ENC_ALT:
return encoding_pair[1], [cmd-(1<<1), encoding_pair[1]]
else:
return encoding_pair[1], [cmd-1, encoding_pair[1]]
if (cmd & 0xC1) == ENC_INV:
if value == 0:
return [encoding_pair[1][0], encoding_pair[1][0]^0xFF], [ENC_LIT - 1 + ENC_LIT_MIN, [encoding_pair[1][0], encoding_pair[1][0]^0xFF]]
else:
return [encoding_pair[1][0], encoding_pair[1][0]^0xFF], [cmd-(1<<1), encoding_pair[1]]
return [], encoding_pair
# we don't have to look at the end of verbatim,
# that's the only thing that got already done
# verbatim blocks are still what we want to improve
# works with those, having onesteps (80, 70, C0) followed by verbatim:
#####
# 10 could be followed by two increments of it, same goes for C0
# -> saves nothing, slows down skipping
# -> unless that’s all the verbatim contains = 1 byte saved
# -> or we need at least 3 -> we'd already found that
# X
# 07 could be followed by repetitions of the last number
# -> could become C0 or 80
# 10 and 07 could be followed by alternations
# -> could become C0 or A0 or E0
# works with those who could be transformed to a shorter notation with one or two additional numbers
#####
# 3 byte notations to 1 or 2 byte notations
# 2 byte notations to 1 byte notation
# verbatim that can change encoding with one or two additional numbers:
#####
# this is basically a special case of the first section
# can this even save space?
def improve_compression(output):
output = output.copy()
i = 1
while i < len(output):
cmd = output[i][0]
lcmd = output[i-1][0]
if (cmd & 0x80) == 0 and (cmd & 0xF0) != ENC_INC:
value = cmd+ENC_LIT_MIN
# verbatim
if enc_poppable(output[i-1]) and value >= 2:
# they are all 1B
if (lcmd & 0xE0) == ENC_RUN or (lcmd & 0xF0) == ENC_INC or (lcmd & 0x80) == ENC_LIT:
el, encoding_pair = enc_pop(output[i-1])
el = el[0]
byte1 = output[i][1][0]
byte2 = output[i][1][1]
if args.increment_compression == "yes" and (lcmd & 0xE) == ENC_RUN and value == 2:
if el + 1 == byte1 and byte1+1 == byte2:
# ENC_INC
if args.output != "-":
print("- I found something improvable!")
elif (lcmd & 0xE) == ENC_RUN and value == 3:
byte3 = output[i][1][2]
if el == byte2 and byte1 == byte3:
# ENC_ALT || ENC_INV or even ENC_RUN || ENC_ROW
if args.output != "-":
print("- I found something improvable!!")
elif args.increment_compression == "yes" and value == 1 and lcmd == ENC_ALT:# short for min amount ALT
# ALT A B LIT C -> INC A INC A
a = output[i-1][1][0]
b = output[i-1][1][1]
c = output[i][1][0]
if a+1 == b and b+1 == c:
output[i-1][0] = ENC_INC + 1
del output[i-1][1][1]
output[i][0] = ENC_INC + 2
output[i][1][0] = output[i-1][1][0]
# those are usually not found because this length could interrupt LITs
# we lack knowledge during parsing
elif args.increment_compression == "yes" and value == 2 and output[i][1][0]+1 == output[i][1][1]:
# LIT A B -> INC A
output[i][0] = ENC_INC + 1
del output[i][1][1]
i += 1
return output
# helper function for compress_rle
def flush_verbatim(verbatim, output, datasize):
if(args.color_line_compression == "yes" and len(verbatim) == 2 and (verbatim[0] == 0xFF or verbatim[0] == 0x00) and (verbatim[1] == 0xFF or verbatim[1] == 0x00)):
# 1 byte instead of 3
h = 0x0
l = 0x0
if verbatim[0] == 0x00:
h = 0x10
if verbatim[1] == 0x00:
l = 0x01
output.append([ENC_ROW | h | l | 0,[]])
datasize += 1
else:
output.append([ENC_LIT | len(verbatim)-ENC_LIT_MIN, verbatim.copy()])
datasize += 1+len(verbatim)
return datasize, output
ENC_INC = 0x00 # 2+
ENC_INC_MIN = 1
ENC_INC_MAX = ENC_INC_MIN+15-1 #exclude MON
ENC_LIT = 0x00 # 1+
ENC_LIT_MIN = (1-0x10)
ENC_LIT_MAX = ENC_LIT_MIN+127-(ENC_INC_MAX-ENC_INC_MIN)
ENC_RUN = 0x80 # 2+
ENC_RUN_MIN = 1
ENC_RUN_MAX = ENC_RUN_MIN+31-1
ENC_INV = 0xC0 # 4+
ENC_INV_MIN = 2
ENC_INV_MAX = ENC_INV_MIN+31
ENC_ROW = 0xA0 # 2+
ENC_ROW_MIN = 1
ENC_ROW_MAX = ENC_ROW_MIN+7
ENC_ALT = 0xC1 # 4+
ENC_ALT_MIN = 2
ENC_ALT_MAX = ENC_ALT_MIN+30
ENC_EOD = 0x00 # 0
ENC_MON = 0x80 # 0
#current:
# command bytes with size, values
# and outputed bytes per value
#
# 0x 0b NAME SIZE OUT VALUES
####################################
# 00 00000000 EOD [ 1B ] 0B 1
# 00 0000XXXX INC [ 2B ] 1B 2-(17-1)
# 00 0XXXXXXX LIT [1B+n] 1B 1-(128-15)
# 80 10000000 MON [ 1B ] 0B 1
# 80 100XXXXX RUN [ 2B ] 1B 2-(33-1)
# A0 101HXXXL ROW [ 1B ] 2B 1-8
# C0 11XXXXX0 INV [ 2B ] 2B 2-33
# E0 11XXXXX1 ALT [ 3B ] 2B 2-33
#
# End Of Data marker
# LITerally writes the following bytes through (worst case reduction)
# INCrements the byte each iteration (mapping compression)
# switch to MONochrome mode aka 1bpp
# RUNs the byte X times (darkest and brightest color)
# writes ROWs of constant color, High and Low are inverted (all four colors)
# alternates between byte and it's INVersion (middle colors)
# ALTernates between the two bytes (all four colors)
#
# Switching to 1bpp mode is only allowed at input byte position %2==0
#
# == 0x0 is cheap
# 0x1-1=0x0 and 0x0-1=0xFF
# nibble swap is cheap
# all who output 2B got their values shifted one to the left (*2)
#
# masks are 0x80 (lit) 0x70(inc) 0xE0 (run, row) 0xC1 (inv, alt)
#
# 70 and A0 could be disabled-> speeds up LIT and RUN
# 00 00000000 EOD
# 00 0XXXXXXX LIT
# 80 10XXXXXX RUN
# C0 11XXXXX0 INV
# E0 11XXXXX1 ALT
# 1101 1111 represents 8 times 0xFF 0xFF, which are 16 bytes / one tile
# 70 and C0 can be unused, which shrinks down the decompressor
def compress_rle(data):
# we calculate new compressed datasize
datasize = 0
output = []
if args.monochrome != "no":
output.append([ENC_MON, []])
datasize += 1
verbatim = [data[0]]
# 0 is one byte, 1 is 2 bytes and 2 is incremental
mode = 0
# first character has run of 1
counter = 1
# first character is already in verbatim buffer
# we basically handle byte from last round
i = 1
while i < len(data):
if mode == 0 and data[i-1] == data[i]:
# run
counter += 1
# delete data[i-1] from array | |
see if IP is in a network, but /32 can be used in network.overlaps() method to accomplish the same thing
if isinstance(network, ipaddress.IPv4Network):
iptype = 4
else:
iptype = 6
import codecs
# func to highlight matches on screen
def findmatch(o, network, ippath):
global iptype
if isinstance(ippath[o], list):
for i in range(len(ippath[o])):
try:
tip = ipaddress.ip_network(ippath[o][i])
except:
return
if network.overlaps(tip):
ippath[o][i] += " #Match Found!#"
return
if isinstance(ippath[o], str):
try:
tip = ipaddress.ip_network(ippath[o])
except:
return
if network.overlaps(tip):
ippath[o] += " #Match Found!#"
return
# func to find a matching key, send to highlight func
def findip(o, match, network, ippath):
if isinstance(o, dict):
for k, v in o.items():
if k in match:
findmatch(k, network, ippath=o)
elif isinstance(v, dict):
findip(v, match, network, ippath=v)
elif isinstance(v, list):
for i in range(len(v)):
findip(v[i], match, network, ippath=v)
elif isinstance(o, list):
for i in range(len(o)):
findip(o[i], match, network, ippath)
return
# network.overlaps()
if iptype == 4:
match = ['CidrBlock', 'Cidrs', 'PrivateIp', 'PrivateIpAddress', 'PublicIp', 'CustomerOwnedIp', 'TransitGatewayCidrBlocks', 'CarrierIp', 'DestinationCidrBlock', 'CidrIp', 'CidrIpv4']
else:
match = ['Ipv6CidrBlock', 'DestinationIpv6CidrBlock', 'Ipv6Address', 'CidrIpv6']
# run through vpcs to find ips
for i in range(len(vpcs['Vpcs'])):
# if list of selected vpc-ids, see if this is excluded
if args.vpc_ids is not None:
if vpcs['Vpcs'][i]['VpcId'] not in args.vpc_ids:
continue
findip(vpcs['Vpcs'][i], match, network, ippath=vpcs['Vpcs'][i])
# split option with json? bypass highlighting code and drop back into normal processing
if out != "json" or args.split is None:
# convert dict to string to allow replacing highlight placeholder with a working highlighter
# this is only needed because json.dumps does not handle terminal control characters
json_string = json.dumps(vpcs, indent=2, default=datetime_handler)
if out == "json":
# json - use terminal colors
class style():
RED = '\033[31m'
GREEN = '\033[1;32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
RESET = '\033[0m'
repl = style.YELLOW + "Match Found!" + style.RESET
# run substitution
tmp = re.sub("#Match Found!#", repl, json_string)
# System call - may not be needed but some claim it allows colorize functions to work for some reason
os.system("")
# send highlighted json to stdout and end
print(tmp)
sys.exit()
else:
# html output - substitute in place with html
repl = "<span style='background-color:blue;color:yellow;'>Match Found!</span>"
tmp = re.sub("#Match Found!#", repl, json_string)
del json_string
vpcs = json.loads(tmp)
del tmp
# json to stdout
if out == "json":
if args.split is None:
print(json.dumps(vpcs, indent=2, default=datetime_handler))
else:
# json to split output files
for vpc in vpcs['Vpcs']:
vpc_id = vpc['VpcId']
if args.split == "name":
vpc_file = get_tag_name(vpc)
if vpc_file == "":
vpc_file = vpc_id
else:
vpc_file = vpc_id
vpct = {'Vpcs': [vpc]}
vpct['Region'] = region
with open(f'{vpc_file}.json', mode='w') as vf:
json.dump(vpct, vf, indent=2, default=datetime_handler)
# html to stdout or split
else:
if args.vpc_ids is not None and args.split is None:
partial = "Partial "
else:
partial = ""
def html_header(dest):
print("<!DOCTYPE html>", file=dest)
print("<html>", file=dest)
print("<div class='container'>", file=dest)
print(f"<head><p style='font-size:150%;text-align:center;color:blue;'>{partial}VPC Configuration Report for {repr_title}<br>{now}", file=dest)
if args.ip is not None:
print(f"<br>IP Search: {args.ip[0]}", file=dest)
print("""<style>h1 {margin-top:10px} h1, h3, h4, h5, h6 {margin-bottom:0px;} h3, h4 {margin-top:5px;}
h2 {margin-bottom:5px;} td, th {padding: 0px 5px 0px 5px; text-align:left;}
.container {
//width: 90%;
margin: 0px 35px;
}
.accordion, .accordion2 {
background-color: #f9f9f9;
color: #000;
cursor: pointer;
border: none;
outline: none;
text-align: left;
font-weight: bold;
font-family: 'Times New Roman';
font-size: 15px;
min-width: 50%;
transition: 175ms;
}
.active, .accordion:hover, .active2, .accordion2:hover {
background-color: #e9e9e9;
}
.accordion:after, .accordion2:after {
background-color: #f9f9f9;
content: '\\002B';
color: #000;
font-weight: bold;
float: right;
margin-left: 5px;
}
.active:after, .active2:after {
content: '\\2212';
background-color: #e9e9e9;
}
.sect, .sect2 {
background-color: white;
max-height: 0px;
overflow: hidden;
transition: max-height 175ms ease-out;
}
</style>""", file=dest)
print(f"<title>VPC Configuration Report ({repr_title})</title>", file=dest)
print("</head><body>", file=dest)
def html_vpc_sections(dest, vpc, vpc_name):
# get id for current vpc
vpc_id = vpc['VpcId']
# save subnet names for other sections to use
sn_names_dict = {} # sn_id -> sn_name
if "Subnets" in vpc:
for sn in vpc['Subnets']:
sn_names_dict[sn['SubnetId']] = get_tag_name(sn)
# vpc name and id heading, along with current state
# owner, tenancy
print(f"<hr><h1><span style='color:blue;'>{vpc_name}</span> {vpc_id} ({vpc['State']})</h1>", file=dest)
print(f"Owner: {vpc['OwnerId']}, Tenancy: {vpc['InstanceTenancy']}<br>", file=dest)
# azs
if "AvailabilityZones" in vpc:
az_dict = {}
for i in range(len(vpc['AvailabilityZones'])):
messages = ""
for m in vpc['AvailabilityZones'][i]['Messages']:
messages += f"{m['Message']} "
az_dict[vpc['AvailabilityZones'][i]['ZoneName']] = f"<tr><td>{vpc['AvailabilityZones'][i]['ZoneName']}<td>{vpc['AvailabilityZones'][i]['ZoneId']}<td>{vpc['AvailabilityZones'][i]['State']}<td>{messages}"
azs = ""
for k in sorted(az_dict):
azs += az_dict[k]
print(f"{button}Availability Zones{div}<table><tbody>", file=dest)
print(azs, file=dest)
print("</table></div>", file=dest)
# cidrs
if "CidrBlock" in vpc:
print(f"{button}CIDR Blocks{div}<table><tbody>{tr}IPv4{space}", file=dest)
for c in range(len(vpc['CidrBlockAssociationSet'])):
print(f"{vpc['CidrBlockAssociationSet'][c]['CidrBlock']} ({vpc['CidrBlockAssociationSet'][c]['CidrBlockState']['State']}) ", file=dest)
if "Ipv6CidrBlockAssociationSet" in vpc:
print(f"{tr}IPv6{space}", file=dest)
for c in range(len(vpc['Ipv6CidrBlockAssociationSet'])):
print(f"{vpc['Ipv6CidrBlockAssociationSet'][c]['Ipv6CidrBlock']} ({vpc['Ipv6CidrBlockAssociationSet'][c]['Ipv6CidrBlockState']['State']}) ", file=dest)
print("</table></div>", file=dest)
# dhcp options
if "DhcpOptions" in vpc:
print(f"{button}DHCP Options ({vpc['DhcpOptionsId']}){div}<table><tbody>", file=dest)
for d in vpc['DhcpOptions'][0]['DhcpConfigurations']:
print(tr+d['Key']+space, file=dest)
for v in d['Values']:
print(f"{v['Value']} ", file=dest)
print("</table></div>", file=dest)
## gateways
if "EgressOnlyInternetGateways" in vpc or "InternetGateways" in vpc or "NatGateways" in vpc or "TransitGateways" in vpc:
print(f"{button}Gateways{div}<table><tbody>", file=dest)
# egress only internet gateways
eoig_dict = {}
if "EgressOnlyInternetGateways" in vpc:
for g in vpc['EgressOnlyInternetGateways']:
gid = g['EgressOnlyInternetGatewayId']
for a in g['Attachments']:
s = a['State']
v = a['VpcId']
eoig_dict[v] = [gid, s]
print(f"{tr}Egress Only Internet Gateway{space}{eoig_dict[vpc_id][0]} ({eoig_dict[vpc_id][1]})", file=dest)
# internet gateway
ig_dict = {}
if "InternetGateways" in vpc:
for g in vpc['InternetGateways']:
gid = g['InternetGatewayId']
for a in g['Attachments']:
s = a['State']
v = a['VpcId']
ig_dict[v] = [gid, s]
print(f"{tr}Internet Gateway{space}{ig_dict[vpc_id][0]} ({ig_dict[vpc_id][1]})", file=dest)
# nat gateways
ng_dict = {}
if "NatGateways" in vpc:
for g in vpc['NatGateways']:
v = g['VpcId']
gid = g['NatGatewayId']
s = g['State']
ct = g['ConnectivityType']
ips = ""
if "NatGatewayAddresses" in g:
sep = ""
for nga in g['NatGatewayAddresses']:
ips += f"{sep}{nga['NetworkInterfaceId']}: {nga['PrivateIp']}⇆{nga['PublicIp']}"
sep = ", "
sn = g['SubnetId']
if v in ng_dict:
ng_dict[v].append([gid, s, ct, ips, sn])
else:
ng_dict[v] = [[gid, s, ct, ips, sn]]
for i in range(len(ng_dict[vpc_id])):
print(f"{tr}NAT Gateway{space}{ng_dict[vpc_id][i][0]} ({ng_dict[vpc_id][i][1]}, {ng_dict[vpc_id][i][2]})<td>{ng_dict[vpc_id][i][3]}<td>{ng_dict[vpc_id][i][4]}", file=dest)
# transit gateways
# get tgw's for vpc and dedupe the ids
if "TransitGateways" in vpc:
for i in range(len(vpc['TransitGateways'])):
print(f"{tr}Transit Gateway{space}{vpc['TransitGateways'][i]['TransitGatewayId']} ({vpc['TransitGateways'][i]['State']})<td>{vpc['TransitGateways'][i]['Description']}", file=dest)
# end gateways
print(f"</table></div>", file=dest)
# network interfaces
if 'NetworkInterfaces' in vpc:
print(f"{button}Network Interfaces{div}<table><tbody>", file=dest)
nif_dict = {}
print("<tr><th>Description<th>Att. Status<th>I/F Status<th>AZ<th>Subnet Name<th>Subnet ID<th>SG Names<th>SG IDs<th>Network I/F ID<th>Private IP<th>Public IP<th>Other Private IPs<th>IPv4 Prefixes<tbody>", file=dest)
for nif in vpc['NetworkInterfaces']:
if "Association" in nif:
if "PublicIp" in nif['Association']:
public_ip = nif['Association']['PublicIp']
else:
public_ip = "-"
if "Groups" in nif and len(nif['Groups']) > 0:
sg_names = ""
sg_ids = ""
sep = ""
for g in nif['Groups']:
sg_names += sep + g['GroupName']
sg_ids += sep + g['GroupId']
sep = ", "
else:
sg_names = "-"
sg_ids = "-"
more_ips = "-"
if "PrivateIpAddresses" in nif and len(nif['PrivateIpAddresses']) > 1:
more_ips = ""
for i in range(len(nif['PrivateIpAddresses'])):
if not nif['PrivateIpAddresses'][i]['Primary']:
more_ips += nif['PrivateIpAddresses'][i]['PrivateIpAddress'] + " "
prefix4 = "-"
if "Ipv4Prefixes" in nif:
prefix4 = ""
for i in range(len(nif['Ipv4Prefixes'])):
prefix4 += nif['Ipv4Prefixes'][i]['Ipv4Prefix'] + " "
# prefix6 = "/ "
# if "Ipv6Prefixes" in nif:
# for i in range(len(nif['Ipv6Prefixes'])):
# prefix6 += nif['Ipv6Prefixes'][i]['Ipv6Prefix'] + " "
if nif['SubnetId'] in sn_names_dict:
subnet_name = sn_names_dict[nif['SubnetId']]
else:
subnet_name = "name n/a"
nif_key = str(nif['Description'] + spaces)[:100] + nif['SubnetId'] + nif['NetworkInterfaceId']
nif_dict[nif_key] = f"<tr><td>{nif['Description']}<td>{nif['Attachment']['Status']}<td>{nif['Status']}<td>{nif['AvailabilityZone']}<td>{subnet_name}<td>{nif['SubnetId']}<td>{sg_names}<td>{sg_ids}<td>{nif['NetworkInterfaceId']}<td>{nif['PrivateIpAddress']}<td>{public_ip}<td>{more_ips}<td>{prefix4}"
# print nifs sorted by desc + subnet id
for k in sorted(nif_dict):
print(nif_dict[k], file=dest)
print("</table></div>", file=dest)
# subnets
if "Subnets" in vpc:
snitems_dict = {}
print(f"{button}Subnets{div}<table><thead>", file=dest)
print("<tr><th>Name<th>ID<th>Default<th>IPv4<th>IPv6<th>AZ<th>AZ ID<th>State<tbody>", file=dest)
for sn in vpc['Subnets']:
# default?
default = "-"
if sn['DefaultForAz']:
default = "Yes"
# get ipv6 cidrs
cidrs = ""
sep = ""
if "Ipv6CidrBlockAssociationSet" in sn:
for t in range(len(sn['Ipv6CidrBlockAssociationSet'])):
cidrs += (f"{sep}{sn['Ipv6CidrBlockAssociationSet'][t]['Ipv6CidrBlock']} {sn['Ipv6CidrBlockAssociationSet'][t]['Ipv6CidrBlockState']['State']}")
sep = ", "
# construct name from tag (dict), ID and default
sn_name = f" {sn['SubnetId']}{default}"
# subnet info
snitems_key = str(sn_names_dict[sn['SubnetId']]+spaces)[:100] + sn['SubnetId']
snitems_dict[snitems_key] = f"<tr><td>{sn_names_dict[sn['SubnetId']]}<td>{sn['SubnetId']}<td>{default}<td>{sn['CidrBlock']}<td>{cidrs}<td>{sn['AvailabilityZone']}<td>{sn['AvailabilityZoneId']}<td>{sn['State']}"
# print subnet info sorted by name + id
for k in sorted(snitems_dict):
print(snitems_dict[k], file=dest)
print("</table></div>", file=dest)
# vpc endpoints
if "VpcEndpoints" in vpc:
print(f"{button}VPC Endpoints{div}<table><thead>", file=dest)
vpce_dict = {}
print("<tr><th>Endpoint ID<th>Endpoint Type<th>Service Name<th>State<th>Route Table IDs<th>Private DNS Enabled<tbody>", file=dest)
for vpce in vpc['VpcEndpoints']:
if len(vpce['RouteTableIds']) > | |
eine Zeile
nr += 1
aus.write(str(nr).rjust(i1)) # 1. Item
aus.write(str(neu3_gh[z][0]).rjust(i2))# 2. Item
aus.write(str(neu3_gh[z][1]).rjust(i3))# 3. Item
zwi3 = neu3_gh[z][1]
if zwi3 > gesmodus[1]:
gesmodus = (neu3_gh[z][0], zwi3)
gessumme += zwi3
aus.write(str(neu3_gh[z][2]).rjust(i4))# 4. Item
for f in range(len_alle):
zwi2 = neu3_gh[z][f + 3]
if zwi2 > datmodus[f][1]:
datmodus[f] = (neu3_gh[z][0], zwi2)
datsumme[f] += zwi2
if zwi2 > 0:
aus.write(str(zwi2).rjust(i5)) # 5ff. Item
else:
aus.write("----".rjust(i5)) # 5ff. Item
aus.write("\n")
# --------------------------------------------------------------
# Abspann
aus.write("-" * (i1 + i2 + i3 + i4 + len_alle *(i5))+ "\n")
aus.write(fd_summary_sum.ljust(i1+i2))
aus.write(str(gessumme).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(str(datsumme[f]).rjust(i5))
aus.write("\n")
aus.write(fd_modus.ljust(i1+i2))
aus.write(str(gesmodus[1]).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(str(datmodus[f][1]).rjust(i5))
aus.write("\n")
aus.write(fd_at.ljust(i1+i2))
aus.write(str(gesmodus[0]).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(str(datmodus[f][0]).rjust(i5))
aus.write("\n\n")
aus.write(fd_min_freq.ljust(breite) + trenner + str(min(neu3_gh)[0]).rjust(i2) + "\n")
aus.write(fd_max_freq.ljust(breite) + trenner + str(max(neu3_gh)[0]).rjust(i2) + "\n\n")
# --------------------------------------------------------------
# (13-10) Ausgabe, cd ausgeben (Zeichen-Verteilung):
#
# character_distribution: global; Schalter für cd
# P_ges_alle_zeichen : Ergebnisdaten (aus ges_alle_zeichen) aus allen pkl-Dateien
# neu_gaz : Zusammenfassung aus P_ges_alle_zeichen
# neu2_gaz : sortiert aus neu_gaz
# neu3_gaz : neustrukturiert aus neu_gaz / neu2_gaz
# len_alle : Anzahl der Dateien
# l3 : lokale Hilfsvariable
# f : Schleifenvariable über alle Dateien
# g : Schleifenvariable über alle Items einer Datei
# pp : lokale Hilfsvariable
# zeichen :
# anzahl :
# ergebnis : lokale Hilfsstruktur
# z : Schleifenvariable neu2_gaz ---> neu3_gaz
# ww : lokale Hilfsvariable
# zwi : lokale Hilfsstruktur
# zwi2 : lokale Hilfsgröße
# zwi3 : lokale Hilfsgröße
# breite : lokale Hilfsstruktur: Label-Breite
# nr : lokale Hilfsstruktur: laufende Nummer bei Ausgabe
# gessumme : lokale Hilfsgröße: Summe über alle Zeilen
# datsumme : lokale Hilfsstruktur: Summe über alle Zeilen (über alle Dateien)
# gesmodus : lokale Hilfsgröße: Berechnung des Modus für Gesamtzahlen
# datmodus : lokale Hilfsstruktur: Berechnung des Modus für alle Dateien
# summe : lokale Hilfsgröße
# maxzahl : lokale Hilfsgröße
# leer : globale Hilfsgröße
# trenner : globale Hilfsgröße
# P_ges_alle_zeichen[i] --> (Neustrukturierung) --> neu_gaz --> (Sortierung) --> neu2_gaz --> (Neustrukturierung) --> neu3_gaz
if character_distribution:
__ueberschrift(caption_cd,"-")
# --------------------------------------------------------------
# Vereinbarungen
neu_gaz = {} # Zusammenfassung aus P_ges_alle_zeichen
neu3_gaz = [] # neustrukturiert aus neu_gaz / neu2_gaz
# --------------------------------------------------------------
# Hilfsvariablen
breite = 25
summe = 0
maxzahl = 0
nr = 0
gessumme = 0
datsumme = [0 for x in range(len_alle)]
gesmodus = (0, 0)
datmodus = [(0, 0) for x in range(len_alle)] # (länge, anzahl)
# --------------------------------------------------------------
# Neustrukturierung
l3 = len_alle + 2
for f in range(len_alle):
pp = P_ges_alle_zeichen[f]
for g in pp:
zeichen = g
anzahl = pp[g]
if maxzahl < anzahl:
amaxzahl = anzahl
if not(zeichen in neu_gaz):
ergebnis = [0 for x in range(l3)]
ergebnis[0] = anzahl
ergebnis[f + 2] = anzahl
ergebnis[1] = 1
neu_gaz[zeichen]= ergebnis
else:
neu_gaz[zeichen][f+2] = anzahl
neu_gaz[zeichen][0] += anzahl
neu_gaz[zeichen][1] += 1
if summe < neu_gaz[zeichen][0]:
summe = neu_gaz[zeichen][0]
# --------------------------------------------------------------
#Sortierung
neu2_gaz = sorted(neu_gaz)
# --------------------------------------------------------------
# Neustrukturierung
for z in range(len(neu2_gaz)):
ww = neu2_gaz[z]
zwi = [ww]
zwi += neu_gaz[ww]
neu3_gaz += [tuple(zwi)]
# --------------------------------------------------------------
# Hilfsvariablen für die Ausgabe
i1 = floor(lg(len(neu3_gaz))) + 2 # Breite der Kolumne 1 (laufende Nummer); adaptierbar <-- len(neu3_gaz)
i2 = 3 # Breite der Kolumne 2 (Zeichen)
i2a = 7 # zugehöriger Hex-Code
i3 = floor(lg(summe)) + 3 # Breite der Kolumne 3 (Summenzahl über alle Dateien); adaptierbar
i4 = floor(lg(len_alle)) + 3 # Breite der Kolumne 5 (Zahl der Dateien); adaptierbar
i5 = i3 # Breite der Kolumne 6 (Anzahl); adaptierbar <-- summe
# --------------------------------------------------------------
# Legende und Kopf
aus.write("(1) {0}".format(cd_hdr_nr) + "\n")
aus.write("(2) {0}".format(cd_hdr_char) + "\n")
aus.write("(3) {0}".format(cd_hdr_hex) + "\n")
aus.write("(4) {0}".format(cd_hdr_char_nr) + "\n")
aus.write("(5) {0}".format(cd_hdr_files_nr) + "\n")
for f in range(len_alle):
aus.write("({0}) {1} {2} ({3} --> \n {4})".format(str(f + 6), cd_hdr_infile, str(f), alle[f], P_kopf[f][7]) + "\n")
aus.write("\n")
aus.write("(1)".rjust(i1))
aus.write("(2)".rjust(i2))
aus.write("(3)".rjust(i2a))
aus.write("(4)".rjust(i3))
aus.write("(5)".rjust(i4))
for f in range(len_alle):
aus.write(("(" + str(f + 6) + ")").rjust(i5))
aus.write("\n")
aus.write("-" * (i1 + i2 + i2a + i3 + i4 + len_alle *(i5))+ "\n")
# --------------------------------------------------------------
# Ausgabeschleife
for z in range(len(neu3_gaz)): # jeweils eine Zeile
nr += 1
aus.write(str(nr).rjust(i1)) # 1. Item
aus.write(str(neu3_gaz[z][0]).rjust(i2)) # 2. Item
aus.write(__chr_hex(neu3_gaz[z][0]).rjust(i2a))# 3. Item
aus.write(str(neu3_gaz[z][1]).rjust(i3)) # 4. Item
zwi3 = neu3_gaz[z][1]
if zwi3 > gesmodus[1]:
gesmodus = (neu3_gaz[z][0], zwi3)
gessumme += zwi3
aus.write(str(neu3_gaz[z][2]).rjust(i4)) # 5. Item
for f in range(len_alle):
zwi2 = neu3_gaz[z][f + 3]
if zwi2 > datmodus[f][1]:
datmodus[f] = (neu3_gaz[z][0], zwi2)
datsumme[f] += zwi2
if zwi2 > 0:
aus.write(str(zwi2).rjust(i5)) # 6ff. Item
else:
aus.write("----".rjust(i5)) # 6ff. Item
aus.write("\n")
# --------------------------------------------------------------
# Abspann
aus.write("-" * (i1 + i2 + i2a + i3 + i4 + len_alle *(i5))+ "\n")
aus.write(cd_summary_sum.ljust(i1+i2+i2a))
aus.write(str(gessumme).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(str(datsumme[f]).rjust(i5))
aus.write("\n")
aus.write(cd_modus.ljust(i1+i2+i2a))
aus.write(str(gesmodus[1]).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(str(datmodus[f][1]).rjust(i5))
aus.write("\n")
aus.write(cd_at.ljust(i1+i2+i2a))
aus.write(__chr_out(gesmodus[0]).rjust(i3))
aus.write(leer.rjust(i4))
for f in range(len_alle):
aus.write(__chr_out(datmodus[f][0]).rjust(i5))
aus.write("\n\n")
# --------------------------------------------------------------
# (13-11) Ausgabe, sd ausgeben (Trennzeichen-Verteilung):
#
# separator_distribution: global; Schalter für sd
# P_ges_trennzeichen : Ergebnisdaten (aus ges_trennzeichen) aus allen pkl-Dateien
# neu_gt : Zusammenfassung aus P_ges_trennzeichen
# neu2_gt : sortiert aus neu_gh
# neu3_gt : neustrukturiert aus neu_gt / neu2_gt
# len_alle : Anzahl der Dateien
# l3 : lokale Hilfsvariable
# f : Schleifenvariable über alle Dateien
# g : Schleifenvariable über alle Items einer Datei
# pp : lokale Hilfsvariable
# zeichen :
# anzahl :
# ergebnis : lokale Hilfsstruktur
# z : Schleifenvariable neu2_gt ---> neu3_gt
# ww : lokale Hilfsvariable
# zwi : lokale Hilfsstruktur
# zwi2 : lokale Hilfsgröße
# zwi3 : lokale Hilfsgröße
# breite : lokale Hilfsstruktur: Label-Breite
# nr : lokale Hilfsstruktur: laufende Nummer bei Ausgabe
# gessumme : lokale Hilfsgröße: Summe über alle Zeilen
# datsumme : lokale Hilfsstruktur: Summe über alle Zeilen (über alle Dateien)
# gesmodus : lokale Hilfsgröße: Berechnung des Modus für Gesamtzahlen
# datmodus : lokale Hilfsstruktur: Berechnung des Modus für alle Dateien
# summe : lokale Hilfsgröße
# maxzahl : lokale Hilfsgröße
# leer : globale Hilfsgröße
# trenner : globale Hilfsgröße
# P_ges_alle_zeichen[i] --> (Neustrukturierung) --> neu_gt --> (Sortierung) --> neu2_gt --> (Neustrukturierung) --> neu3_gt
if separator_distribution:
__ueberschrift(caption_sd,"-")
# --------------------------------------------------------------
# Vereinbarungen
neu_gt = {} # Zusammenfassung aus P_ges_trennzeichen
neu3_gt = [] # neustrukturiert aus neu_gt / neu2_gt
# --------------------------------------------------------------
# Hilfsvariablen
breite = 25
summe = 0
maxzahl = 0
nr = 0
gessumme = 0
datsumme = [0 for x in range(len_alle)]
gesmodus = (0, 0)
datmodus = [(0, 0) for x in range(len_alle)] # (länge, anzahl)
# --------------------------------------------------------------
# Neustrukturierung
l3 = len_alle + 2
for f in range(len_alle):
pp = P_ges_trennzeichen[f]
for g in pp:
zeichen = g
anzahl = pp[g]
if not(zeichen in neu_gt):
ergebnis = [0 for x in range(l3)]
ergebnis[0] = anzahl
ergebnis[f + 2] = anzahl
ergebnis[1] = 1
neu_gt[zeichen] = ergebnis
else:
neu_gt[zeichen][f+2] = anzahl
neu_gt[zeichen][0] += anzahl
neu_gt[zeichen][1] += 1
if summe < neu_gt[zeichen][0]:
summe = neu_gt[zeichen][0]
# --------------------------------------------------------------
#Sortierung
neu2_gt = sorted(neu_gt)
# --------------------------------------------------------------
# Neustrukturierung
for z in range(len(neu2_gt)):
ww = neu2_gt[z]
zwi = [ww]
zwi += neu_gt[ww]
neu3_gt += [tuple(zwi)]
# --------------------------------------------------------------
# Hilfsvariablen für die Ausgabe
i1 = floor(lg(len(neu3_gt))) + 2 # Breite der Kolumne 1 (laufende Nummer); adaptierbar <-- len(neu3_gt)
i2 = 5 # Breite der Kolumne 2 (Zeichen)
i2a = 7 # zugehöriger Hex-Code
i3 = floor(lg(summe)) + 3 # Breite der Kolumne 3 (Summenzahl über alle Dateien); adaptierbar
i4 = floor(lg(len_alle)) + 3 # Breite der Kolumne 5 (Zahl der Dateien); adaptierbar
i5 = i3 # Breite der Kolumne 6 (Anzahl); adaptierbar <-- summe
# --------------------------------------------------------------
# Legende und Kopf
aus.write("(1) {0}".format(sd_hdr_nr) + "\n")
aus.write("(2) {0}".format(sd_hdr_char) + "\n")
aus.write("(3) {0}".format(sd_hdr_hex) + "\n")
aus.write("(4) {0}".format(sd_hdr_char_nr) + "\n")
aus.write("(5) {0}".format(sd_hdr_files_nr) + "\n")
for f in range(len_alle):
aus.write("({0}) {1} {2} ({3} --> \n {4})".format(str(f + 6), sd_hdr_infile, str(f), alle[f], P_kopf[f][7]) + "\n")
aus.write("\n")
aus.write("(1)".rjust(i1))
aus.write("(2)".rjust(i2))
aus.write("(3)".rjust(i2a))
aus.write("(4)".rjust(i3))
aus.write("(5)".rjust(i4))
for f in range(len_alle):
aus.write(("(" + str(f + 6) + ")").rjust(i5))
aus.write("\n")
aus.write("-" * (i1 + i2 + i2a + i3 + i4 + len_alle *(i5))+ "\n")
# --------------------------------------------------------------
# Ausgabeschleife
for z in range(len(neu3_gt)): # jeweils eine Zeile
nr += 1
aus.write(str(nr).rjust(i1)) # 1. Item
aus.write(__chr_out(neu3_gt[z][0]).rjust(i2)) # 2. Item
aus.write(__chr_hex(neu3_gt[z][0]).rjust(i2a))# 3. Item
aus.write(str(neu3_gt[z][1]).rjust(i3)) # 4. Item
zwi3 = neu3_gt[z][1]
if zwi3 > gesmodus[1]:
gesmodus = (neu3_gt[z][0], zwi3)
gessumme += zwi3
aus.write(str(neu3_gt[z][2]).rjust(i4)) # 5. Item
for f in range(len_alle):
zwi2 = | |
"""Imagniary time analytic continuation kernel routines."""
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing
permissions and limitations under the License."""
import numpy as np
from scipy.linalg import lu_solve, lu_factor
from scipy.linalg.interpolative import interp_decomp
def chebyshev_collocation_points_1st_kind(N):
"""
Return the Chebyshev collocation points of the first kind.
The Chebyshev collocation points of the first kind are defined as
.. math:: x_j = \\cos \\left( \\pi \\frac{2j + 1}{2N} \\right)
where :math:`N` is the order and :math:`j = 0, ..., N-1`.
Parameters
----------
N : int
Order of the collocation point set.
Returns
-------
x_j : ndarray
Array with the collocation points :math:`x_j` sorted in
increasing order.
Examples
--------
>>> from pydlr.kernel import chebyshev_collocation_points_1st_kind
>>> x_j = chebyshev_collocation_points_1st_kind(10)
array([-0.98768834, -0.89100652, -0.70710678, -0.4539905 , -0.15643447,
0.15643447, 0.4539905 , 0.70710678, 0.89100652, 0.98768834])
"""
j = np.arange(N)
x_j = np.cos(np.pi * (2*j + 1)/(2*N))[::-1]
return x_j
def chebyshev_barycentric_weights_1st_kind(N):
"""
Return the Chebyshev barycentric interpolation weights of the first kind.
The barycentric interpolation weights are defined as
.. math:: w_j = (-1)^{j} \\sin \\left( \\pi \\frac{2j + 1}{2N} \\right)
where :math:`N` is the order and :math:`j = 0, ..., N-1`.
Parameters
----------
N : int
Order of the collocation point set.
Returns
-------
w_j : ndarray
Array with the barycentric weights :math:`w_j`.
Examples
--------
>>> from pydlr.kernel import chebyshev_barycentric_weights_1st_kind
>>> w_j = chebyshev_barycentric_weights_1st_kind(10)
array([ 0.15643447, -0.4539905 , 0.70710678, -0.89100652, 0.98768834,
-0.98768834, 0.89100652, -0.70710678, 0.4539905 , -0.15643447])
"""
j = np.arange(N)
w_j = (-1)**j * np.sin(np.pi * (2*j + 1)/(2*N))
return w_j
def barycentric_chebyshev_interpolation(x, x_j, f_j, w_j):
"""
Return the barycentric interpolation of a Chebyshev polynomial on arbitrary points.
The Barycentric interpolation formula has the form
.. math:: f(x) = \\left( \\sum_{j=0}^{N-1} \\frac{f_j w_j}{x - x_j} \\right) \Bigg/ \\left( \\sum_{j=0}^{N-1} \\frac{w_j}{x - x_j} \\right)
Parameters
----------
x : ndarray
points :math:`x` to evaluate the Chebyshev polynomial at
x_j : ndarray
Chebyshev collocation points :math:`x_j` of the first kind
f_j : ndarray
Values :math:`f_j` of the polynomial at the collocation points, :math:`f_j = f(x_j)`
w_j : ndarray
Chebyshev barycentric interpolation weights :math:`w_j`
Returns
-------
f_x : ndarray
Values :math:`f(x)` of the polynomial at the points :math:`x`
Examples
--------
>>> import numpy as np
>>> f = lambda x : np.cos(4*np.pi * x)
>>> from pydlr.kernel import chebyshev_collocation_points_1st_kind
>>> from pydlr.kernel import chebyshev_barycentric_weights_1st_kind
>>> x_j = chebyshev_collocation_points_1st_kind(32)
>>> w_j = chebyshev_barycentric_weights_1st_kind(32)
>>> f_j = f(x_j)
>>> from pydlr.kernel import barycentric_chebyshev_interpolation
>>> x = np.linspace(-1, 1, num=1000)
>>> f_x = barycentric_chebyshev_interpolation(x, x_j, f_j, w_j)
>>> np.testing.assert_array_almost_equal(f_x, f(x))
>>> print(np.max(np.abs(f_x - f(x))) < 1e-9)
True
"""
# -- Barycentric interpolation off the grid
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in true_divide")
sup.filter(RuntimeWarning, "invalid value encountered in true_divide")
q_xj = w_j[:, None] / (x[None, :] - x_j[:, None])
f_x = np.sum(q_xj * f_j[:, None, ...], axis=0) / np.sum(q_xj, axis=0)
# -- Direct value lookup if x is on the grid x_j
idxs = np.argwhere(x[:, None] == x_j[None, :])
if len(idxs) > 0:
xidx, j = idxs.T
f_x[xidx] = f_j[j]
return f_x
def fermi_function(E, beta):
"""
Evaluate the Fermi distribution function at energy :math:`E` and inverse temperature :math:`\beta`.
The Fermi distribution function :math:`f_\\beta(E)` has the form
.. math:: f_\\beta(E) = \\frac{1}{1 + e^{\\beta E}}
the evaluation is stabilized using separate formulas for :math:`E \lessgtr 0`.
Parameters
----------
E : ndarray
Energies
beta : float
Inverse temperature
Returns
-------
f : ndarray
The Fermi distribution function evaluated at :math:`E`
"""
f = np.zeros_like(E)
p, m = np.argwhere(E > 0), np.argwhere(E <= 0)
f[p] = np.exp(-beta*E[p]) / (1. + np.exp(-beta*E[p]))
f[m] = 1. / (np.exp(beta*E[m]) + 1.)
return f
def kernel(tau, omega):
"""
Evaluate the imaginary time and real frequency analytical continuation kernel :math:`K(\\tau, \\omega)`.
The Fermionic analytical continuation kernel has the form
.. math:: K(\\tau, \\omega) = \\frac{e^{-\\omega \\tau}}{1 + e^{\\omega}}
in normalized imaginary time :math:`\\tau \\in [0, 1]` and frequency :math:`\omega`.
The evaluation is stabilized using separate formulas for :math:`E \lessgtr 0`.
Parameters
----------
tau : ndarray
Points in imaginary time :math:`\\tau_j`
omega : ndarray
Points in real frequency :math:`\\omega_k`
Returns
-------
kernel : ndarray
Kernel :math:`K_{jk}` evaluated on the :math:`\\tau_j` and :math:`\\omega_k` grids,
:math:`K_{jk} = K(\\tau_j, \\omega_k)`
"""
kernel = np.empty((len(tau), len(omega)))
p, = np.where(omega > 0.)
m, = np.where(omega <= 0.)
w_p, w_m = omega[p].T, omega[m].T
tau = tau[:, None]
kernel[:, p] = np.exp(-tau*w_p) / (1 + np.exp(-w_p))
kernel[:, m] = np.exp((1. - tau)*w_m) / (1 + np.exp(w_m))
return kernel
def gridparams(lamb, order=24, lambda_scale=1.):
"""
Empirical discretization parameters of :math:`K(\\tau, \\omega)` for given :math:`\Lambda`
Parameters
----------
lamb : float
Cutoff parameter :math:`\Lambda`
order : int, optional
Order of the polynomial expansion
Returns
-------
order : int
polynomial order
npt : int
number of panel refinements in imaginary time
npo : int
number of panel refinements in frequency
nt : int
total number of imaginary time points
no : int
total number of real frenquency points
"""
lamb *= lambda_scale
npt = int(np.max([np.ceil(np.log(lamb)/np.log(2.))-2, 1]))
npo = int(np.max([np.ceil(np.log(lamb)/np.log(2.)), 1]))
nt = 2 * order * npt
no = 2 * order * npo
return order, npt, npo, nt, no
def kernel_discretization(lamb, error_est=False):
"""
Return kernel discretization correct to machine prescision for given :math:`\Lambda`
Parameters
----------
lamb : float
Cut-off parameter :math:`\Lambda`
error_est : bool, optional
Perform convergence check of discretization (NB, slow)
Returns
-------
kmat : ndarray
Discretization of the analytical continuation kernel :math:`K_{jk}`
t : ndarray
Discretized imaginary time grid :math:`\\tau_j`
w : ndarray
Discretized real frequency grid :math:`\\omega_k`
"""
order, npt, npo, nt, no = gridparams(lamb)
#print(f'order = {order}, npt = {npt}, npo = {npo}, nt = {nt}, no = {no}')
x_i = chebyshev_collocation_points_1st_kind(order)
w_i = chebyshev_barycentric_weights_1st_kind(order)
# -- Tau panel discretization
i = np.arange(npt)
t_panel_break_pt = np.zeros(npt + 1)
t_panel_break_pt[1:] = 0.5 ** (npt - i)
t = np.zeros(nt)
for i in range(npt):
a, b = t_panel_break_pt[i], t_panel_break_pt[i + 1]
t[i*order:(i+1)*order] = a + (b - a)*0.5*(x_i+1)
# -- Frequency panel discretization
j = np.arange(npo)
w_panel_break_pt = np.zeros(2*npo + 1)
w_panel_break_pt[npo+1:] = lamb * 0.5 ** (npo - j - 1)
w_panel_break_pt[:npo] = - w_panel_break_pt[npo+1:][::-1]
w = np.zeros(no)
for i in range(2*npo):
a, b = w_panel_break_pt[i], w_panel_break_pt[i + 1]
w[i*order:(i+1)*order] = a + (b - a)*0.5*(x_i+1)
kmat = kernel(t[:nt//2], w)
kmat = np.vstack((kmat, kmat[::-1, ::-1]))
if not error_est:
return kmat, t, w
else:
# -- Error estimate
N = order
x2_i = chebyshev_collocation_points_1st_kind(2*N)
err = 0.
for widx in range(no):
for tp in range(npt):
a, b = t_panel_break_pt[tp], t_panel_break_pt[tp + 1]
X = a + (b - a)*0.5*(x2_i + 1)
K = np.squeeze(kernel(X, np.array([w[widx]])))
K_interp = barycentric_chebyshev_interpolation(
x2_i, x_i, kmat[N*tp:N*(tp+1), widx], w_i)
perr = np.max(np.abs(K - K_interp))
err = np.max([err, perr])
for tidx in range(nt//2):
for wp in range(2*npo):
a, b = w_panel_break_pt[wp], w_panel_break_pt[wp + 1]
X = a + (b - a)*0.5*(x2_i + 1)
K = np.squeeze(kernel(np.array([t[tidx]]), X))
K_interp = barycentric_chebyshev_interpolation(
x2_i, x_i, kmat[tidx, N*wp:N*(wp+1)], w_i)
perr = np.max(np.abs(K - K_interp))
err = np.max([err, perr])
return kmat, t, w, err
class KernelInterpolativeDecoposition:
"""
Interpolative decomposition class for the imaginary-time analytical continuation kernel.
This is the SciPy based driver class for the Discrete Lehmann Representation (DLR).
Parameters
----------
lamb : float
DLR scale parameter :math:`\\Lambda`.
eps : float
Set accuracy of the DLR representation.
xi : sign, optional,
Statistical sign :math:`\\xi = \\pm 1` for bosons and fermions respectively.
max_rank : int, optional
Maximum rank of the DLR kernel decomposition. Default 500.
nmax : int, optional
Maxumum index of the Matsubara frequency grid. Default int(lamb).
| |
virtual machine "
"{0}".format(label))
# Check state.
self._wait_status(label, self.RUNNING)
def stop(self, label):
"""Stops a virtual machine. Kill them all.
@param label: virtual machine name.
@raise CuckooMachineError: if unable to stop virtual machine.
"""
log.debug("Stopping machine %s", label)
if self._status(label) == self.POWEROFF:
raise CuckooMachineError("Trying to stop an already stopped "
"machine {0}".format(label))
# Force virtual machine shutdown.
conn = self._connect()
try:
if not self.vms[label].isActive():
log.debug("Trying to stop an already stopped machine %s. "
"Skip", label)
else:
self.vms[label].destroy() # Machete's way!
except libvirt.libvirtError as e:
raise CuckooMachineError("Error stopping virtual machine "
"{0}: {1}".format(label, e))
finally:
self._disconnect(conn)
# Check state.
self._wait_status(label, self.POWEROFF)
def shutdown(self):
"""Override shutdown to free libvirt handlers - they print errors."""
super(LibVirtMachinery, self).shutdown()
# Free handlers.
self.vms = None
def dump_memory(self, label, path):
"""Takes a memory dump.
@param path: path to where to store the memory dump.
"""
log.debug("Dumping memory for machine %s", label)
conn = self._connect()
try:
# Resolve permission issue as libvirt creates the file as
# root/root in mode 0600, preventing us from reading it. This
# supposedly still doesn't allow us to remove it, though..
open(path, "wb").close()
self.vms[label].coreDump(path, flags=libvirt.VIR_DUMP_MEMORY_ONLY)
except libvirt.libvirtError as e:
raise CuckooMachineError("Error dumping memory virtual machine "
"{0}: {1}".format(label, e))
finally:
self._disconnect(conn)
def _status(self, label):
"""Gets current status of a vm.
@param label: virtual machine name.
@return: status string.
"""
log.debug("Getting status for %s", label)
# Stetes mapping of python-libvirt.
# virDomainState
# VIR_DOMAIN_NOSTATE = 0
# VIR_DOMAIN_RUNNING = 1
# VIR_DOMAIN_BLOCKED = 2
# VIR_DOMAIN_PAUSED = 3
# VIR_DOMAIN_SHUTDOWN = 4
# VIR_DOMAIN_SHUTOFF = 5
# VIR_DOMAIN_CRASHED = 6
# VIR_DOMAIN_PMSUSPENDED = 7
conn = self._connect()
try:
state = self.vms[label].state(flags=0)
except libvirt.libvirtError as e:
raise CuckooMachineError("Error getting status for virtual "
"machine {0}: {1}".format(label, e))
finally:
self._disconnect(conn)
if state:
if state[0] == 1:
status = self.RUNNING
elif state[0] == 3:
status = self.PAUSED
elif state[0] == 4 or state[0] == 5:
status = self.POWEROFF
else:
status = self.ERROR
# Report back status.
if status:
self.set_status(label, status)
return status
else:
raise CuckooMachineError("Unable to get status for "
"{0}".format(label))
def _connect(self):
"""Connects to libvirt subsystem.
@raise CuckooMachineError: when unable to connect to libvirt.
"""
# Check if a connection string is available.
if not self.dsn:
raise CuckooMachineError("You must provide a proper "
"connection string")
try:
return libvirt.open(self.dsn)
except libvirt.libvirtError:
raise CuckooMachineError("Cannot connect to libvirt")
def _disconnect(self, conn):
"""Disconnects to libvirt subsystem.
@raise CuckooMachineError: if cannot disconnect from libvirt.
"""
try:
conn.close()
except libvirt.libvirtError:
raise CuckooMachineError("Cannot disconnect from libvirt")
def _fetch_machines(self):
"""Fetch machines handlers.
@return: dict with machine label as key and handle as value.
"""
vms = {}
for vm in self.machines():
vms[vm.label] = self._lookup(vm.label)
return vms
def _lookup(self, label):
"""Search for a virtual machine.
@param conn: libvirt connection handle.
@param label: virtual machine name.
@raise CuckooMachineError: if virtual machine is not found.
"""
conn = self._connect()
try:
vm = conn.lookupByName(label)
except libvirt.libvirtError:
raise CuckooMachineError("Cannot find machine "
"{0}".format(label))
finally:
self._disconnect(conn)
return vm
def _list(self):
"""List available virtual machines.
@raise CuckooMachineError: if unable to list virtual machines.
"""
conn = self._connect()
try:
names = conn.listDefinedDomains()
except libvirt.libvirtError:
raise CuckooMachineError("Cannot list domains")
finally:
self._disconnect(conn)
return names
def _version_check(self):
"""Check if libvirt release supports snapshots.
@return: True or false.
"""
if libvirt.getVersion() >= 8000:
return True
else:
return False
def _get_snapshot(self, label):
"""Get current snapshot for virtual machine
@param label: virtual machine name
@return None or current snapshot
@raise CuckooMachineError: if cannot find current snapshot or
when there are too many snapshots available
"""
def _extract_creation_time(node):
"""Extracts creation time from a KVM vm config file.
@param node: config file node
@return: extracted creation time
"""
xml = ET.fromstring(node.getXMLDesc(flags=0))
return xml.findtext("./creationTime")
snapshot = None
conn = self._connect()
try:
vm = self.vms[label]
# Try to get the currrent snapshot, otherwise fallback on the latest
# from config file.
if vm.hasCurrentSnapshot(flags=0):
snapshot = vm.snapshotCurrent(flags=0)
else:
log.debug("No current snapshot, using latest snapshot")
# No current snapshot, try to get the last one from config file.
snapshot = sorted(vm.listAllSnapshots(flags=0),
key=_extract_creation_time,
reverse=True)[0]
except libvirt.libvirtError:
raise CuckooMachineError("Unable to get snapshot for "
"virtual machine {0}".format(label))
finally:
self._disconnect(conn)
return snapshot
class Processing(object):
"""Base abstract class for processing module."""
order = 1
enabled = True
def __init__(self):
self.analysis_path = ""
self.baseline_path = ""
self.logs_path = ""
self.task = None
self.options = None
self.results = {}
def set_options(self, options):
"""Set report options.
@param options: report options dict.
"""
self.options = options
def set_task(self, task):
"""Add task information.
@param task: task dictionary.
"""
self.task = task
def set_baseline(self, baseline_path):
"""Set the path to the baseline directory."""
self.baseline_path = baseline_path
def set_path(self, analysis_path):
"""Set paths.
@param analysis_path: analysis folder path.
"""
self.analysis_path = analysis_path
self.log_path = os.path.join(self.analysis_path, "analysis.log")
self.file_path = os.path.realpath(os.path.join(self.analysis_path,
"binary"))
self.dropped_path = os.path.join(self.analysis_path, "files")
self.dropped_meta_path = os.path.join(self.analysis_path, "files.json")
self.package_files = os.path.join(self.analysis_path, "package_files")
self.buffer_path = os.path.join(self.analysis_path, "buffer")
self.logs_path = os.path.join(self.analysis_path, "logs")
self.shots_path = os.path.join(self.analysis_path, "shots")
self.pcap_path = os.path.join(self.analysis_path, "dump.pcap")
self.pmemory_path = os.path.join(self.analysis_path, "memory")
self.memory_path = os.path.join(self.analysis_path, "memory.dmp")
self.mitmout_path = os.path.join(self.analysis_path, "mitm.log")
self.mitmerr_path = os.path.join(self.analysis_path, "mitm.err")
self.tlsmaster_path = os.path.join(self.analysis_path, "tlsmaster.txt")
self.suricata_path = os.path.join(self.analysis_path, "suricata")
self.network_path = os.path.join(self.analysis_path, "network")
self.taskinfo_path = os.path.join(self.analysis_path, "task.json")
def set_results(self, results):
"""Set the results - the fat dictionary."""
self.results = results
def run(self):
"""Start processing.
@raise NotImplementedError: this method is abstract.
"""
raise NotImplementedError
class Signature(object):
"""Base class for Cuckoo signatures."""
name = ""
description = ""
severity = 1
order = 1
categories = []
families = []
authors = []
references = []
platform = None
alert = False
enabled = True
minimum = None
maximum = None
# Maximum amount of marks to record.
markcount = 50
# Basic filters to reduce the amount of events sent to this signature.
filter_apinames = []
filter_categories = []
# If no on_call() handler is present and this field has been set, then
# dispatch on a per-API basis to the accompanying API. That is, rather
# than calling the generic on_call(), call, e.g., on_call_CreateFile().
on_call_dispatch = False
def __init__(self, caller):
"""
@param caller: calling object. Stores results in caller.results
"""
self.marks = []
self.matched = False
self._caller = caller
# These are set by the caller, they represent the process identifier
# and call index respectively.
self.pid = None
self.cid = None
self.call = None
def _check_value(self, pattern, subject, regex=False, all=False):
"""Checks a pattern against a given subject.
@param pattern: string or expression to check for.
@param subject: target of the check.
@param regex: boolean representing if the pattern is a regular
expression or not and therefore should be compiled.
@return: boolean with the result of the check.
"""
ret = set()
if regex:
exp = re.compile(pattern, re.IGNORECASE)
if isinstance(subject, list):
for item in subject:
if exp.match(item):
ret.add(item)
else:
if exp.match(subject):
ret.add(subject)
else:
if isinstance(subject, list):
for item in subject:
if item.lower() == pattern.lower():
ret.add(item)
else:
if subject == pattern:
ret.add(subject)
# Return all elements.
if all:
return list(ret)
# Return only the first element, if available. Otherwise return None.
elif ret:
return ret.pop()
def get_results(self, key=None, default=None):
if key:
return self._caller.results.get(key, default)
return self._caller.results
def get_processes(self, name=None):
"""Get a list of processes.
@param name: If set only return processes with that name.
@return: List of processes or empty list
"""
for item in self.get_results("behavior", {}).get("processes", []):
if name is None or item["process_name"] == name:
yield item
def get_process_by_pid(self, pid=None):
"""Get a process by its process identifier.
@param pid: pid to search for.
@return: process.
"""
for item in self.get_results("behavior", {}).get("processes", []):
if item["pid"] == pid:
return item
def get_summary(self, key=None, default=[]):
"""Get one or all values related to the global summary."""
summary = self.get_results("behavior", {}).get("summary", {})
return summary.get(key, default) if key else summary
def get_summary_generic(self, pid, actions):
"""Get generic info from summary.
@param pid: pid of the process. None for all
@param actions: A list of actions to get
"""
ret = []
for process in self.get_results("behavior", {}).get("generic", []):
if pid is not None | |
OoOoOO00 / oO0o / Ii1I * ooOoO0o
if 51 - 51: oO0o
if 34 - 34: OoOoOO00 . i11iIiiIii * OOooOOo . ooOoO0o * O0 * OoO0O00
if 27 - 27: Ii1I . o0oOOo0O0Ooo - OoOoOO00 . II111iiii % Oo0Ooo
if 83 - 83: I11i + oO0o - iIii1I11I1II1 + II111iiii . iII111i
oOO0 = 56 if ( self . outer_version == 6 ) else 36
IiI11II = IIii1i [ 0 : oOO0 ]
oOooooO = IIii1i [ oOO0 : oOO0 + 20 ]
OO0 = IIii1i [ oOO0 + 20 : : ]
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
if 48 - 48: I11i / iIii1I11I1II1 % II111iiii
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
if 100 - 100: OoooooooOO - OoooooooOO + IiII
iIiIi1i1Iiii = struct . unpack ( "H" , oOooooO [ 6 : 8 ] ) [ 0 ]
iIiIi1i1Iiii = socket . ntohs ( iIiIi1i1Iiii )
if ( iIiIi1i1Iiii & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
OOO00000O = IIii1i [ oOO0 : : ]
if ( self . send_icmp_too_big ( OOO00000O ) ) : return ( [ ] , None )
if 23 - 23: Oo0Ooo - O0
if ( lisp_ignore_df_bit ) :
iIiIi1i1Iiii &= ~ 0x4000
else :
iI111iIi = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( iI111iIi ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 26 - 26: OOooOOo % OOooOOo / i11iIiiIii + I1ii11iIi11i - O0
if 20 - 20: I1Ii111 . O0 - I1ii11iIi11i / OoOoOO00 - o0oOOo0O0Ooo
if 79 - 79: OoooooooOO - iIii1I11I1II1
OoO00oo00 = 0
iiiIIiiIi = len ( OO0 )
IiIIii = [ ]
while ( OoO00oo00 < iiiIIiiIi ) :
IiIIii . append ( OO0 [ OoO00oo00 : OoO00oo00 + 1400 ] )
OoO00oo00 += 1400
if 9 - 9: i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
oo0O0OO = IiIIii
IiIIii = [ ]
IIi1IiiIi1III = True if iIiIi1i1Iiii & 0x2000 else False
iIiIi1i1Iiii = ( iIiIi1i1Iiii & 0x1fff ) * 8
for oO in oo0O0OO :
if 19 - 19: i1IIi % I1IiiI - iIii1I11I1II1 - oO0o / I1ii11iIi11i
if 16 - 16: Ii1I
if 79 - 79: OoooooooOO - ooOoO0o * Ii1I - II111iiii % OoOoOO00 * IiII
if 31 - 31: I1IiiI
IIII1I1 = iIiIi1i1Iiii / 8
if ( IIi1IiiIi1III ) :
IIII1I1 |= 0x2000
elif ( oO != oo0O0OO [ - 1 ] ) :
IIII1I1 |= 0x2000
if 36 - 36: Ii1I * I11i . I11i / Oo0Ooo / I1IiiI
IIII1I1 = socket . htons ( IIII1I1 )
oOooooO = oOooooO [ 0 : 6 ] + struct . pack ( "H" , IIII1I1 ) + oOooooO [ 8 : : ]
if 80 - 80: OoooooooOO - i1IIi
if 51 - 51: i1IIi . OoOoOO00 / OoOoOO00 % i11iIiiIii * OOooOOo - I1Ii111
if 49 - 49: Oo0Ooo - iIii1I11I1II1
if 64 - 64: I1Ii111 + iIii1I11I1II1
if 14 - 14: Ii1I / OoooooooOO + II111iiii . O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
iiiIIiiIi = len ( oO )
iIiIi1i1Iiii += iiiIIiiIi
I1111III111ii = socket . htons ( iiiIIiiIi + 20 )
oOooooO = oOooooO [ 0 : 2 ] + struct . pack ( "H" , I1111III111ii ) + oOooooO [ 4 : 10 ] + struct . pack ( "H" , 0 ) + oOooooO [ 12 : : ]
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
oOooooO = lisp_ip_checksum ( oOooooO )
iIiI1I = oOooooO + oO
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
iiiIIiiIi = len ( iIiI1I )
if ( self . outer_version == 4 ) :
I1111III111ii = iiiIIiiIi + oOO0
iiiIIiiIi += 16
IiI11II = IiI11II [ 0 : 2 ] + struct . pack ( "H" , I1111III111ii ) + IiI11II [ 4 : : ]
if 2 - 2: II111iiii + i1IIi
IiI11II = lisp_ip_checksum ( IiI11II )
iIiI1I = IiI11II + iIiI1I
iIiI1I = self . fix_outer_header ( iIiI1I )
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
II11II = oOO0 - 12
I1111III111ii = socket . htons ( iiiIIiiIi )
iIiI1I = iIiI1I [ 0 : II11II ] + struct . pack ( "H" , I1111III111ii ) + iIiI1I [ II11II + 2 : : ]
if 40 - 40: iII111i + O0
IiIIii . append ( iIiI1I )
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
return ( IiIIii , "Fragment-Inner" )
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
def fix_outer_header ( self , packet ) :
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 22 - 22: I1Ii111
if 23 - 23: O0
return ( packet )
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 39 - 39: OoooooooOO
dest = dest . print_address_no_iid ( )
IiIIii , i1iIII1IIi = self . fragment ( )
if 63 - 63: II111iiii . I1Ii111 % IiII + II111iiii
for iIiI1I in IiIIii :
if ( len ( IiIIii ) != 1 ) :
self . packet = iIiI1I
self . print_packet ( i1iIII1IIi , True )
if 81 - 81: OOooOOo - I1IiiI % o0oOOo0O0Ooo
if 7 - 7: ooOoO0o - i1IIi . OoOoOO00
try : lisp_raw_socket . sendto ( iIiI1I , ( dest , 0 ) )
except socket . error , oOo :
lprint ( "socket.sendto() failed: {}" . format ( oOo ) )
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if | |
threat_weight = getThreatWeight()
bobot_aset = getBobotAssets()
data_app_threat = getAppThreat()
data_vulnerability_avg = getVulnerabilityAVG()
data_hitung_avg = hitungAVG(data_vulnerability_avg)
data_app_threat_vulnerability = hitungVulnerability(data_app_threat, data_hitung_avg)
data_threat = getThreat()
nilai_bobot_aset = {}
id_app_bobot_aset = []
data_id_app = {}
unique_id_app_sum = {}
sum_risk_value = {}
for data in bobot_aset:
id_app_bobot_aset.append(data[0])
nilai_bobot_aset[data[0]] = data[4]
for i in threat_weight:
if i[0] in id_app_bobot_aset:
sum_th = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
if i[0] in data_id_app:
id_app_temp = data_id_app[i[0]]
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] += sum_th
else:
id_app_temp = []
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] = sum_th
for id_app, value in unique_id_app_sum.items():
sum_risk_value[id_app] = value + data_app_threat_vulnerability[i[0]]['average']
hasil_akhir = {}
for id_app in unique_id_app_sum:
hasil_akhir[id_app] = sum_risk_value[id_app]
dict_sum_th = {}
for i in threat_weight:
if i[0] in dict_sum_th:
list_temp_th = dict_sum_th[i[0]]
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
dict_temp_th[i[1]] = 0
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
list_temp_th = []
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
dict_temp_th[i[1]] = 0
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
list_id_threat = [i[0] for i in data_threat]
final_sum_th = {}
for k, v in dict_sum_th.items():
dict_temp_th_2 = {}
dict_temp_th_sum = {}
for dict_data in v:
for a, b in dict_data.items():
dict_temp_th_2[a] = b
for i in list_id_threat:
if i in dict_temp_th_2:
continue
else:
dict_temp_th_2[i] = 0
for id_threat, sum in sorted(dict_temp_th_2.items()):
dict_temp_th_sum[id_threat] = sum
final_sum_th[k] = dict_temp_th_sum
list_data_final = []
for key, value in data_id_app.items():
temp_dict = {}
temp_dict[key] = {
'probabilitas_threat': final_sum_th[key],
'bobot': nilai_bobot_aset[key],
'hasil_akhir': hasil_akhir[key]
}
list_data_final.append(temp_dict)
final_data = {
'data': list_data_final
}
return final_data
@app.route('/admin/saat_usulan')
def index_during_migration():
if 'loggedin_admin' in session:
id_app = getAssets()
data_saat_usulan = getSaatUsulan()
final_data = hitungDataSaatUsulan(data_saat_usulan)
return render_template('during_migration.html', data_saat_usulan=data_saat_usulan, final_data=final_data, id_app=id_app)
return redirect(url_for('login'))
@app.route('/saat_usulan')
def index_saat_usulan():
if 'loggedin' in session:
id_app = getAssets()
data_saat_usulan = getSaatUsulan()
email = session['username']
count_data = getCountSaatUsulan(email)
final_data = hitungDataSaatUsulan(data_saat_usulan)
return render_template('saat_usulan.html', data_saat_usulan=data_saat_usulan, final_data=final_data, id_app=id_app, count_data=count_data[0][0])
return redirect(url_for('home'))
@app.route('/saat_usulan/insert', methods = ['POST'])
def insert_saat_usulan():
if request.method == "POST":
flash("Data Inserted Successfully")
email = request.form['email']
id_app = request.form['id_app']
criteria_1 = request.form['criteria_1']
criteria_2 = request.form['criteria_2']
criteria_3 = request.form['criteria_3']
criteria_4 = request.form['criteria_4']
criteria_5 = request.form['criteria_5']
insertSaatUsulan(email, id_app, criteria_1, criteria_2, criteria_3, criteria_4, criteria_5)
return redirect(url_for('index_saat_usulan'))
@app.route('/saat_usulan/delete/<string:id_app>', methods = ['GET'])
def delete_saat_usulan(id_app):
flash("Record Has Been Deleted Successfully")
email = session['username']
deleteSaatUsulan(id_app, email)
return redirect(url_for('index_saat_usulan'))
@app.route('/saat_usulan/update',methods=['POST','GET'])
def update_saat_usulan():
if request.method == 'POST':
email = session['username']
id_app = request.form['id_app']
criteria_1 = request.form['criteria_1']
criteria_2 = request.form['criteria_2']
criteria_3 = request.form['criteria_3']
criteria_4 = request.form['criteria_4']
criteria_5 = request.form['criteria_5']
updateSaatUsulan(email, id_app, criteria_1, criteria_2, criteria_3, criteria_4, criteria_5)
flash("Data Updated Successfully")
return redirect(url_for('index_saat_usulan'))
def hitungDataSaatUsulan(data_saat_usulan):
data_unique_id_app = {}
final_data = hitungResikoSebelumUsulan()
data_final = {}
sum_data_risk = {}
for datas in final_data['data']:
for data in datas:
data_final[data] = datas[data]['hasil_akhir']
for data in data_saat_usulan:
if data[0] not in data_unique_id_app:
sum_data = data_final[data[0]] + data[6]
sum_data_risk[data[0]] = round(sum_data / 2, 2)
final_data = {}
for id_app in sum_data_risk:
final_data[id_app] = {
'risk_value': sum_data_risk[id_app]
}
return final_data
def hitungDataSetelahUsulan(data_setelah_usulan):
final_data = hitungResikoSebelumUsulan()
data_final = {}
for datas in final_data['data']:
for data in datas:
data_final[data] = datas[data]['hasil_akhir']
hasil_data = {}
for data in data_setelah_usulan:
if data[0] not in hasil_data:
hasil_data[data[0]] = round((data_final[data[0]] + data[6]) / 2, 2)
final = {}
for id_app in hasil_data:
final[id_app] = {
'risk_value': hasil_data[id_app]
}
return final
@app.route('/admin/setelah_usulan')
def index_after_migration():
if 'loggedin_admin' in session:
id_app = getAssets()
data_setelah_usulan = getSetelahUsulan()
final = hitungDataSetelahUsulan(data_setelah_usulan)
return render_template('after_migrating.html', data_setelah_usulan=data_setelah_usulan, final=final, id_app=id_app)
return redirect(url_for('login'))
@app.route('/setelah_usulan')
def index_setelah_usulan():
if 'loggedin' in session:
id_app = getAssets()
data_setelah_usulan = getSetelahUsulan()
email = session['username']
count_data = getCountSaatUsulan(email)
final_data = hitungResikoSebelumUsulan()
data_final = {}
for datas in final_data['data']:
for data in datas:
data_final[data] = datas[data]['hasil_akhir']
hasil_data = {}
for data in data_setelah_usulan:
if data[0] not in hasil_data:
hasil_data[data[0]] = round((data_final[data[0]] + data[6])/2, 2)
final = {}
for id_app in hasil_data:
final[id_app] = {
'risk_value':hasil_data[id_app]
}
return render_template('setelah_usulan.html', data_setelah_usulan=data_setelah_usulan, final=final, id_app=id_app, count_data=count_data[0][0])
return redirect(url_for('home'))
@app.route('/setelah_usulan/insert', methods = ['POST'])
def insert_setelah_usulan():
if request.method == "POST":
flash("Data Inserted Successfully")
email = request.form['email']
id_app = request.form['id_app']
criteria_1 = request.form['criteria_1']
criteria_2 = request.form['criteria_2']
criteria_3 = request.form['criteria_3']
criteria_4 = request.form['criteria_4']
criteria_5 = request.form['criteria_5']
insertSetelahUsulan(email, id_app, criteria_1, criteria_2, criteria_3, criteria_4, criteria_5)
return redirect(url_for('index_setelah_usulan'))
@app.route('/setelah_usulan/delete/<string:id_app>', methods = ['GET'])
def delete_setelah_usulan(id_app):
flash("Record Has Been Deleted Successfully")
email = session['username']
deleteSetelahUsulan(id_app, email)
return redirect(url_for('index_setelah_usulan'))
@app.route('/setelah_usulan/update',methods=['POST','GET'])
def update_setelah_usulan():
if request.method == 'POST':
email = session['username']
id_app = request.form['id_app']
criteria_1 = request.form['criteria_1']
criteria_2 = request.form['criteria_2']
criteria_3 = request.form['criteria_3']
criteria_4 = request.form['criteria_4']
criteria_5 = request.form['criteria_5']
updateSetelahUsulan(email, id_app, criteria_1, criteria_2, criteria_3, criteria_4, criteria_5)
flash("Data Updated Successfully")
return redirect(url_for('index_setelah_usulan'))
def hitungResikoShareeful():
global hasil_akhir
threat_weight = getThreatWeight()
bobot_aset = getBobotAssets()
data_threat = getThreat()
nilai_bobot_aset = {}
id_app_bobot_aset = []
data_id_app = {}
unique_id_app_sum = {}
for data in bobot_aset:
id_app_bobot_aset.append(data[0])
nilai_bobot_aset[data[0]] = data[4]
for i in threat_weight:
if i[0] in id_app_bobot_aset:
sum_th = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
if i[0] in data_id_app:
id_app_temp = data_id_app[i[0]]
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] += sum_th
else:
id_app_temp = []
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] = sum_th
hasil_akhir = {}
for id_app in unique_id_app_sum:
hasil_akhir[id_app] = unique_id_app_sum[id_app]
dict_sum_th = {}
for i in threat_weight:
if i[0] in dict_sum_th:
list_temp_th = dict_sum_th[i[0]]
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
dict_temp_th[i[1]] = 0
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
list_temp_th = []
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(i[2] * nilai_bobot_aset[i[0]] / 100, 2)
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
dict_temp_th[i[1]] = 0
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
list_id_threat = [i[0] for i in data_threat]
final_sum_th = {}
for k, v in dict_sum_th.items():
dict_temp_th_2 = {}
dict_temp_th_sum = {}
for dict_data in v:
for a, b in dict_data.items():
dict_temp_th_2[a] = b
for i in list_id_threat:
if i in dict_temp_th_2:
continue
else:
dict_temp_th_2[i] = 0
for id_threat, sum in sorted(dict_temp_th_2.items()):
dict_temp_th_sum[id_threat] = sum
final_sum_th[k] = dict_temp_th_sum
list_data_final = []
for key, value in data_id_app.items():
temp_dict = {}
temp_dict[key] = {
'probabilitas_threat': final_sum_th[key],
'bobot': nilai_bobot_aset[key],
'hasil_akhir': hasil_akhir[key]
}
list_data_final.append(temp_dict)
final_data = {
'data': list_data_final
}
return final_data
@app.route('/shareeful')
def index_shareeful():
if 'loggedin' in session:
email = session['username']
count_data = getCountData(email)
data_shareefull = getNilaiShareeful()
return render_template('shareeful.html', count_data=count_data[0][0], data_shareefull=data_shareefull)
return redirect(url_for('home'))
@app.route('/shareeful/insert', methods = ['POST'])
def insert_shareeful():
if request.method == "POST":
flash("Data Calculated Successfully")
email = session['username']
deleteShareeful(email)
final_data = hitungResikoShareeful()
for datas in final_data['data']:
for data in datas:
email_user = session['username']
th1 = datas[data]['probabilitas_threat']['TH-01']
th2 = datas[data]['probabilitas_threat']['TH-02']
th3 = datas[data]['probabilitas_threat']['TH-03']
th4 = datas[data]['probabilitas_threat']['TH-04']
th5 = datas[data]['probabilitas_threat']['TH-05']
th6 = datas[data]['probabilitas_threat']['TH-06']
th7 = datas[data]['probabilitas_threat']['TH-07']
th8 = datas[data]['probabilitas_threat']['TH-08']
th9 = datas[data]['probabilitas_threat']['TH-09']
th10 = datas[data]['probabilitas_threat']['TH-10']
th11 = datas[data]['probabilitas_threat']['TH-11']
risk_value = datas[data]['hasil_akhir']
id_app = data
insertShareeful(email_user, th1, th2, th3, th4, th5, th6, th7, th8, th9, th10, th11, risk_value,id_app)
return redirect(url_for('index_shareeful'))
@app.route('/admin/report')
def index_report():
if 'loggedin_admin' in session:
asset = getAssets()
data_sebelum = {}
data_sebelum_usulan = getNilaiSebelumUsulan()
for i in data_sebelum_usulan:
data_sebelum[i[0]] = i[13]
data_saat_usulan = getSaatUsulan()
final_data = hitungDataSaatUsulan(data_saat_usulan)
data_setelah_usulan = getSetelahUsulan()
final = hitungDataSetelahUsulan(data_setelah_usulan)
return render_template('report.html', asset=asset, data_sebelum_usulan=data_sebelum, data_saat_usulan = final_data, data_setelah_usulan=final)
return redirect(url_for('login'))
def hitungResikoKozlov():
global hasil_akhir
threat_weight = getThreatWeight()
bobot_aset = getBobotAssets()
data_threat = getThreat()
data_app_threat = getAppThreat()
data_vulnerability_avg = getVulnerabilityAVG()
data_hitung_avg = hitungAVG(data_vulnerability_avg)
data_app_threat_vulnerability = hitungVulnerability(data_app_threat, data_hitung_avg)
nilai_bobot_aset = {}
id_app_bobot_aset = []
data_id_app = {}
unique_id_app_sum = {}
len_id_th_tempt = {}
sum_risk_value = {}
for data in bobot_aset:
id_app_bobot_aset.append(data[0])
nilai_bobot_aset[data[0]] = data[4]
for i in threat_weight:
if i[0] in id_app_bobot_aset:
sum_th = round(data_app_threat_vulnerability[i[0]]['average'] * i[2] * nilai_bobot_aset[i[0]] / 100, 2)
if i[0] in data_id_app:
id_app_temp = data_id_app[i[0]]
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] += sum_th
len_id_th_tempt[i[0]] += 1
else:
id_app_temp = []
id_app_temp.append(i[1])
data_id_app[i[0]] = id_app_temp
unique_id_app_sum[i[0]] = sum_th
len_id_th_tempt[i[0]] = 1
for id_app, value in unique_id_app_sum.items():
sum_risk_value[id_app] = round(value / len_id_th_tempt[id_app], 2)
hasil_akhir = {}
for id_app in unique_id_app_sum:
hasil_akhir[id_app] = sum_risk_value[id_app]
dict_sum_th = {}
for i in threat_weight:
if i[0] in dict_sum_th:
list_temp_th = dict_sum_th[i[0]]
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(data_app_threat_vulnerability[i[0]]['average'] * i[2] * nilai_bobot_aset[i[0]] / 100, 2)
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
dict_temp_th[i[1]] = 0
list_temp_th.append(dict_temp_th)
dict_sum_th[i[0]] = list_temp_th
else:
list_temp_th = []
dict_temp_th = {}
if i[1] in data_id_app[i[0]]:
dict_temp_th[i[1]] = round(data_app_threat_vulnerability[i[0]]['average'] * i[2] * nilai_bobot_aset[i[0]] / | |
import json
import math as m
from abc import ABC, abstractmethod
try:
from webots.controller import Robot
except Exception:
Robot = object
TIME_STEP = 32
MAX_WHEEL_SPEED = 15.0
WHEEL_SPEED = {
'max': MAX_WHEEL_SPEED,
'half': MAX_WHEEL_SPEED / 2.0,
'quarter': MAX_WHEEL_SPEED / 4.0,
'eigth': MAX_WHEEL_SPEED / 8.0,
'sixteenth': MAX_WHEEL_SPEED / 16.0
}
PI_2 = m.pi / 2.0
PI_4 = m.pi / 4.0
def close(current, target, threshold):
if type(current) is list or type(current) is tuple:
return all(abs(x - y) < threshold for x, y in zip(current, target))
return abs(current - target) < threshold
def far(current, target, threshold):
return not close(current, target, threshold)
def bounded_value(value, bound, offset=0.0):
if value > bound:
return -bound + (value - bound) + offset
elif value < -bound:
return bound - (-bound - value) - offset
return value
class AbstractRobot(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def set_gaze_down(self, down, wait): pass
@abstractmethod
def turn_left(self, speed): pass
@abstractmethod
def turn_right(self, speed): pass
@abstractmethod
def face_direction(self, direction): pass
@abstractmethod
def drive(self, distance, speed): pass
@abstractmethod
def pick_object(self, distance, speed): pass
@abstractmethod
def place_object(self, distance, speed): pass
class TextRobot(AbstractRobot):
def __init__(self, home_coords, name='Robart'):
super().__init__()
self.home_coords = home_coords
self.name = name
print(f'Initializing at position {home_coords}')
def set_gaze_down(self, down, wait=True):
output = f'{self.name}: Looking {"down" if down else "up"}'
print(output)
if wait:
print(f'{self.name}: Waiting')
return output
def turn_left(self, speed=WHEEL_SPEED['eigth']):
output = f'{self.name}: Turning left at speed {speed}'
print(output)
return output
def turn_right(self, speed=WHEEL_SPEED['eigth']):
output = f'{self.name}: Turning right at speed {speed}'
print(output)
return output
def face_direction(self, direction):
output = f'{self.name}: Facing direction {direction}'
print(output)
return output
def drive(self, distance, speed=WHEEL_SPEED['half']):
output = f'{self.name}: Driving distance {distance} at speed {speed}'
print(output)
return output
def pick_object(self, distance=0.205, speed=WHEEL_SPEED['quarter']):
output = f'{self.name}: Picking object {distance} away at speed {speed}'
print(output)
return output
def place_object(self, distance=0.2025, speed=WHEEL_SPEED['quarter']):
output = f'{self.name}: Placing object {distance} away at speed {speed}'
print(output)
return output
class WebotsRobot(AbstractRobot, Robot):
def __init__(self, home_coords):
super().__init__()
self.coords = self.home_coords = home_coords
self.direction = self.home_direction = 0
self.available_torques = [0.0, ] * 8
self.timestep = int(self.getBasicTimeStep())
self.m = self._initialize_motors('motors.json')
self.wheel_motors = self.m['wheel_motors']
self.rotation_motors = self.m['rotation_motors']
self.arm_motors = self.m['arm_motors']
self.hand_motors = self.m['hand_motors']
self.head_tilt_motor = self.m['body_motors']['head_tilt']
self.torso_lift_motor = self.m['body_motors']['torso_lift']
self.s = self._initialize_sensors('sensors.json')
self.camera_sensors = self.s['camera_sensors']
self.contact_sensors = self.s['contact_sensors']
self.wheel_sensors = self.s['wheel_sensors']
self.rotation_sensors = self.s['rotation_sensors']
self.arm_sensors = self.s['arm_sensors']
self.hand_sensors = self.s['hand_sensors']
self.head_tilt_sensor = self.s['body_sensors']['head_tilt']
self.torso_lift_sensor = self.s['body_sensors']['torso_lift']
self.compass = self.s['body_sensors']['compass']
self.gps = self.s['body_sensors']['gps']
self.inertial_unit = self.s['body_sensors']['inertial_unit']
self._initialize_robot()
def _initialize_motors(self, motors_file):
with open(motors_file) as f:
m = json.load(f)
for motors in m.values():
for key, value in motors.items():
motors[key] = self.getDevice(value)
if 'wheel' in key:
motors[key].setPosition(float('inf'))
motors[key].setVelocity(0.0)
return m
def _initialize_sensors(self, sensors_file):
with open(sensors_file) as f:
s = json.load(f)
for sensors in s.values():
for key, value in sensors.items():
sensors[key] = self.getDevice(value)
sensors[key].enable(self.timestep)
for name, motors in self.m.items():
name = name.split('_')[0] + '_sensors'
s[name] = {} if name not in s.keys() else s[name]
for key, value in motors.items():
s[name][key] = value.getPositionSensor()
s[name][key].enable(self.timestep)
return s
def _initialize_robot(self):
self.set_gaze_down(True, wait=False)
self._set_arm_position(True, 0.0, 1.35, 0.0, -2.2, 0.0, wait=False)
self._set_arm_position(False, 0.0, 1.35, 0.0, -2.2, 0.0, wait=True)
self._set_hand_closed(True, False, wait=False)
self._set_hand_closed(False, False, wait=True)
def _wait(self, time):
step = 0
while self.step(self.timestep) != -1 and step < time:
step += 1
def _set_motors_positions(self, motors, positions, torque=None):
if not torque:
torque = list(motors)[0].getAvailableTorque()
for motor, position in zip(motors, positions):
motor.setAvailableTorque(torque)
motor.setPosition(position)
def _set_motors_position(self, motors, position, torque=None):
self._set_motors_positions(motors, ((position, )*len(motors)), torque)
def _set_wheels_speeds(self, fll, flr, frl, frr, bll, blr, brl, brr):
targets = (fll, flr, frl, frr, bll, blr, brl, brr)
for wheel, speed in zip(self.wheel_motors.values(), targets):
wheel.setVelocity(speed)
def _set_wheels_speed(self, speed):
self._set_wheels_speeds(*((speed, )*8))
def _set_wheels_rotations(self, fl, fr, bl, br, wait=True):
if wait:
self._set_wheels_speed(0.0)
self._set_wheels_passive(True)
targets = (fl, fr, bl, br)
self._set_motors_positions(self.rotation_motors.values(), targets)
if wait:
test_sensor, test_target = self.rotation_sensors['fl'], targets[0]
while far(test_sensor.getValue(), test_target, 0.05):
self.step(self.timestep)
self._set_wheels_passive(False)
def _set_wheels_rotation(self, rotation, wait=True):
self._set_wheels_rotations(*((rotation, )*4), wait)
def _set_wheels_passive(self, passive):
if passive:
for index, wheel in enumerate(self.wheel_motors.values()):
self.available_torques[index] = wheel.getAvailableTorque()
wheel.setAvailableTorque(0.0)
else:
for index, wheel in enumerate(self.wheel_motors.values()):
wheel.setAvailableTorque(self.available_torques[index])
def _set_robot_rotation(self, angle, speed):
self._set_wheels_rotations(3.0 * PI_4, PI_4, -3.0 * PI_4, -PI_4)
rotation = self.inertial_unit.getRollPitchYaw()[2]
target = bounded_value(rotation + angle, m.pi, 0.025)
wheel_speed = speed if angle > 0 else -speed
self._set_wheels_speed(wheel_speed)
while far(rotation, target, 0.005):
self.step(self.timestep)
rotation = self.inertial_unit.getRollPitchYaw()[2]
if close(rotation, target, 0.05):
self._set_wheels_speed(wheel_speed / 16.0)
self._set_wheels_rotation(0.0)
def _set_arm_speeds(self, left, sp, sl, ar, ef, wr):
motors = [value for key, value in self.arm_motors.items()
if key.startswith('l' if left else 'r')]
targets = (sp, sl, ar, ef, wr)
for motor, velocity in zip(motors, targets):
motor.setVelocity(velocity)
def _set_arm_position(self, left, sp, sl, ar, ef, wr, wait=True):
motors = [value for key, value in self.arm_motors.items()
if key.startswith('l' if left else 'r')]
sensors = [value for key, value in self.arm_sensors.items()
if key.startswith('l' if left else 'r')]
targets = (sp, sl, ar, ef, wr)
for motor, position in zip(motors, targets):
motor.setPosition(position)
self._set_arm_speeds(left, 1.5, 1.5, 1.5, 2.5, 1.5)
while wait and far([s.getValue() for s in sensors], targets, 0.05):
self.step(self.timestep)
def _set_hand_closed(self, left, closed, torque=10.0, wait=True):
motors = [value for key, value in self.hand_motors.items()
if key.startswith('l' if left else 'r')]
positions = [value for key, value in self.hand_sensors.items()
if key.startswith('l' if left else 'r')]
contacts = [value for key, value in self.contact_sensors.items()
if key.startswith('l' if left else 'r')]
target = 0.0 if closed else 0.5
self._set_motors_position(motors, target)
while wait and far(positions[0].getValue(), target, 0.05):
if closed and all(sensor.getValue() > 0.5 for sensor in contacts):
position = max(0.0, 0.95 * positions[0].getValue())
self._set_motors_position(motors, position, torque)
break
self.step(self.timestep)
def _set_robot_alignment(self, alignment, direction):
def comparison(x, y): return x > y if direction in (0, 3) else x < y
target, result = (0, 2) if direction in (0, 2) else (2, 0)
if comparison(self.gps.getValues()[target], alignment[target]):
self._set_wheels_rotations(0.025, 0.025, 0.0, 0.0, False)
else:
self._set_wheels_rotations(-0.025, -0.025, 0.0, 0.0, False)
return self.gps.getValues()[result]
def _get_position_target(self, direction, distance):
axis = 0 if direction in (1, 3) else 1
distance = -distance if direction in (0, 1) else distance
position = self.coords[axis]
return position, position + distance
def set_gaze_down(self, down, wait=True):
target = 0.5 if down else 0.0
self.head_tilt_motor.setPosition(target)
while wait and far(self.head_tilt_sensor.getValue(), target, 0.05):
self.step(self.timestep)
def turn_left(self, speed=WHEEL_SPEED['eigth']):
self._set_robot_rotation(PI_2, speed)
if self.direction < 3:
self.direction += 1
else:
self.direction = 0
def turn_right(self, speed=WHEEL_SPEED['eigth']):
self._set_robot_rotation(-PI_2, speed)
if self.direction > 0:
self.direction -= 1
else:
self.direction = 3
def turn_around(self, speed=WHEEL_SPEED['eigth']):
self._set_robot_rotation(m.pi, speed)
if self.direction < 2:
self.direction += 2
else:
self.direction -= 2
def face_direction(self, direction):
if self.direction - direction in (-1, 3):
self.turn_left()
elif self.direction - direction in (1, -3):
self.turn_right()
elif self.direction - direction in (-2, 2):
self.turn_around()
def drive(self, distance, speed=WHEEL_SPEED['half']):
alignment = self.gps.getValues()
position, target = self._get_position_target(self.direction, distance)
wheel_speed = speed if distance > 0 else -speed
self._set_wheels_speed(wheel_speed)
while far(position, target, 0.0025):
position = self._set_robot_alignment(alignment, self.direction)
if close(position, target, 0.1):
self._set_wheels_speed(wheel_speed / 16.0)
self.step(self.timestep)
self.coords = (self.gps.getValues()[0], self.gps.getValues()[2])
self._set_wheels_speed(0.0)
def pick_object(self, distance=0.1995, speed=WHEEL_SPEED['quarter']):
self._set_arm_position(True, 0.0, 1.1, 0.0, -1.1, 0.0)
self.drive(distance, speed)
self._wait(25)
self._set_hand_closed(True, True)
self._wait(25)
self._set_arm_position(True, 0.0, 0.85, 0.0, -1.25, 0.0, True)
self.drive(-distance, speed)
def place_object(self, distance=0.195, speed=WHEEL_SPEED['quarter']):
self.drive(distance, speed)
self._wait(25)
self._set_arm_position(True, 0.0, 1.1, 0.0, -1.11, 0.0)
self._wait(25)
self._set_hand_closed(True, False)
self.drive(-distance, speed)
self._set_arm_position(True, 0.0, 1.35, 0.0, -2.2, 0.0)
def goto_coords(self, coords, speed=WHEEL_SPEED['half']):
self.set_gaze_down(False)
if far(self.coords[1], coords[1], 0.125):
self.face_direction(1 if self.coords[0] > 6.0 else 3)
self.drive(abs(self.coords[0] - 6.0), speed)
self.face_direction(0 if self.coords[1] > coords[1] else 2)
self.drive(abs(self.coords[1] - coords[1]), speed)
self.face_direction(1 if self.coords[0] > coords[0] else 3)
self.drive(abs(self.coords[0] - coords[0]), speed)
self.face_direction(0 if coords == self.home_coords else 2)
self.set_gaze_down(True)
class RobotController:
def __init__(self, robot: AbstractRobot, home_coords):
self.robot = robot(home_coords=home_coords)
self.home_coords = home_coords
methods = {method: getattr(self.robot, method) for method in dir(
self.robot) if not method.startswith('_')}
self.__dict__.update(methods)
self.stored_coords = None
def __getattr__(self, name):
return getattr(self.robot, name)
def return_item(self, return_home=True):
if self.stored_coords:
self.pick_object()
self.goto_coords(self.stored_coords)
self.place_object()
if return_home:
self.goto_coords(self.home_coords)
self.stored_coords = None
else:
print('Robart: Nothing to return :(')
def get_at_coords(self, coords):
self.return_item(False)
self.goto_coords(coords)
self.pick_object()
self.goto_coords(self.home_coords)
self.place_object()
self.stored_coords = coords
if __name__ == '__main__':
library = {
'can': [4, -1],
'water': [1, -1],
'apple': [-2, -1],
'orange': [-5, -1],
'bottle': [4, 1],
'extinguisher': [1, 1],
'paint': [-2, 1],
'gnome': [-5, 1],
'crackers': [4, 3],
'cereal': [1, 3],
'honey': [-2, 3],
'jam': [-5, 3],
'cup': [4, 5],
'flowers': [1, 5],
'tree': | |
frame of data, such as on reloading the state vector
assert (self._gen_index == 0
and self._update_index == 0
and self._thin_index == 0)
assert (self.generation == self.Ngen
and self._update_count == self.Nupdate
and self._thin_count == self.Nthin)
self.thinning = thinning
if Ngen > self.Ngen:
self._gen_index = self.Ngen # must happen before resize!!
self._gen_draws = np.resize(self._gen_draws, Ngen)
self._gen_logp = np.resize(self._gen_logp, (Ngen, Npop))
self._gen_acceptance_rate \
= np.resize(self._gen_acceptance_rate, Ngen)
elif Ngen < self.Ngen:
self._gen_draws = self._gen_draws[-Ngen:].copy()
self._gen_logp = self._gen_logp[-Ngen:, :].copy()
self._gen_acceptance_rate \
= self._gen_acceptance_rate[-Ngen:].copy()
if Nthin > self.Nthin:
self._thin_index = self.Nthin # must happen before resize!!
self._thin_draws = np.resize(self._thin_draws, Nthin)
self._thin_point = np.resize(self._thin_point, (Nthin, Npop, Nvar))
self._thin_logp = np.resize(self._thin_logp, (Nthin, Npop))
elif Nthin < self.Nthin:
self._thin_draws = self._thin_draws[-Nthin:].copy()
self._thin_point = self._thin_point[-Nthin:, :, :].copy()
self._thin_logp = self._thin_logp[-Nthin:, :].copy()
if Nupdate > self.Nupdate:
self._update_count = self.Nupdate # must happen before resize!!
self._update_draws = np.resize(self._update_draws, Nupdate)
self._update_R_stat \
= np.resize(self._update_R_stat, (Nupdate, Nvar))
self._update_CR_weight \
= np.resize(self._update_CR_weight, (Nupdate, Ncr))
elif Nupdate < self.Nupdate:
self._update_draws = self._update_draws[-Nupdate:].copy()
self._update_R_stat = self._update_R_stat[-Nupdate:, :].copy()
self._update_CR_weight = self._update_CR_weight[-Nupdate:, :].copy()
def save(self, filename):
save_state(self, filename)
def show(self, portion=1.0, figfile=None):
from .views import plot_all
plot_all(self, portion=portion, figfile=figfile)
def _last_gen(self):
"""
Returns x, logp for most recent generation to dream.py.
"""
# Note: if generation number has wrapped and _gen_index is 0
# (the usual case when this function is called to resume an
# existing chain), then this returns the last row in the array.
return (self._thin_point[self._thin_index-1],
self._thin_logp[self._thin_index-1])
def _generation(self, new_draws, x, logp, accept, force_keep=False):
"""
Called from dream.py after each generation is completed with
a set of accepted points and their values.
"""
# Keep track of the total number of draws
# Note: this is first so that we tag the record with the number of
# draws taken so far, including the current draw.
self.draws += new_draws
self.generation += 1
# Record if this is the best so far
maxid = argmax(logp)
if logp[maxid] > self._best_logp:
self._best_logp = logp[maxid]
self._best_x = x[maxid, :]+0 # Force a copy
# Record acceptance rate and cost
i = self._gen_index
#print("generation", i, self.draws, "\n x", x, "\n logp", logp, "\n accept", accept)
self._gen_draws[i] = self.draws
self._gen_acceptance_rate[i] = 100*sum(accept)/new_draws
self._gen_logp[i] = logp
i = i+1
if i == len(self._gen_draws):
i = 0
self._gen_index = i
# Keep every nth iteration
self._thin_timer += 1
if self._thin_timer == self.thinning or force_keep:
self._thin_timer = 0
self._thin_count += 1
i = self._thin_index
self._thin_draws[i] = self.draws
self._thin_point[i] = x
self._thin_logp[i] = logp
i = i+1
if i == len(self._thin_draws):
i = 0
self._thin_index = i
self._gen_current = x+0 # force a copy
else:
self._gen_current = x+0 # force a copy
def _update(self, R_stat, CR_weight):
"""
Called from dream.py when a series of DE steps is completed and
summary statistics/adaptations are ready to be stored.
"""
self._update_count += 1
i = self._update_index
#print("update", i, self.draws, "\n Rstat", R_stat, "\n CR weight", CR_weight)
self._update_draws[i] = self.draws
self._update_R_stat[i] = R_stat
self._update_CR_weight[i] = CR_weight
i = i+1
if i == len(self._update_draws): i = 0
self._update_index = i
def _replace_outlier(self, old, new):
"""
Called from outliers.py when a chain is replaced by the
clone of another.
"""
self._outliers.append((self._thin_index, old, new))
self._gen_logp[:, old] = self._gen_logp[:, new]
self._thin_logp[:, old] = self._thin_logp[:, new]
self._thin_point[:, old, :] = self._thin_point[:, new, :]
# PAK: shouldn't we reduce the total number of draws since we
# are throwing way an entire chain?
@property
def labels(self):
if self._labels is None:
return ["P%d"%i for i in range(self._thin_point.shape[2])]
else:
return self._labels
@labels.setter
def labels(self, v):
self._labels = v
def _draw_pop(self):
"""
Return the current population.
"""
return self._gen_current
def _draw_large_pop(self, Npop):
_, chains, _ = self.chains()
Ngen, Nchain, Nvar = chains.shape
points = reshape(chains, (Ngen*Nchain, Nvar))
# There are two complications with the history buffer:
# (1) due to thinning, not every generation is stored
# (2) because it is circular, the cursor may be in the middle
# If the current generation isn't in the buffer (but is instead
# stored separately as _gen_current), then the entire buffer
# becomes the history pool.
# otherwise we need to exclude the current generation from
# the pool. If (2) happens, we need to increment everything
# above the cursor by the number of chains.
if self._gen_current is not None:
pool_size = Ngen*Nchain
cursor = pool_size # infinite
else:
pool_size = (Ngen-1)*Nchain
k = len(self._thin_draws)
cursor = Nchain*((k+self._thin_index-1)%k)
# Make a return population and fill it with the current generation
pop = empty((Npop, Nvar), 'd')
if self._gen_current is not None:
pop[:Nchain] = self._gen_current
else:
#print(pop.shape, points.shape, chains.shape)
pop[:Nchain] = points[cursor:cursor+Nchain]
if Npop > Nchain:
# Find the remainder with unique ancestors.
# Again, because this is a circular buffer, their may be random
# numbers generated at or above the cursor. All of these must
# be shifted by Nchains to avoid the cursor.
perm = draw(Npop-Nchain, pool_size)
perm[perm >= cursor] += Nchain
#print("perm", perm; raw_input('wait'))
pop[Nchain:] = points[perm]
return pop
def _unroll(self):
"""
Unroll the circular queue so that data access can be done inplace.
Call this when done stepping, and before plotting. Calls to
logp, sample, etc. assume the data is already unrolled.
"""
if self.generation > self._gen_index > 0:
self._gen_draws[:] = np.roll(self._gen_draws,
-self._gen_index, axis=0)
self._gen_logp[:] = np.roll(self._gen_logp,
-self._gen_index, axis=0)
self._gen_acceptance_rate[:] = np.roll(self._gen_acceptance_rate,
-self._gen_index, axis=0)
self._gen_index = 0
if self._thin_count > self._thin_index > 0:
self._thin_draws[:] = np.roll(self._thin_draws,
-self._thin_index, axis=0)
self._thin_point[:] = np.roll(self._thin_point,
-self._thin_index, axis=0)
self._thin_logp[:] = np.roll(self._thin_logp,
-self._thin_index, axis=0)
self._thin_index = 0
if self._update_count > self._update_index > 0:
self._update_draws[:] = np.roll(self._update_draws,
-self._update_index, axis=0)
self._update_R_stat[:] = np.roll(self._update_R_stat,
-self._update_index, axis=0)
self._update_CR_weight[:] = np.roll(self._update_CR_weight,
-self._update_index, axis=0)
self._update_index = 0
def remove_outliers(self, x, logp, test='IQR', portion=0.5):
"""
Replace outlier chains with clones of good ones. This should happen
early in the sampling processes so the clones have an opportunity
to evolve their own identity.
*state* contains the chains, with log likelihood for each point.
*x*, *logp* are the current population and the corresponding
log likelihoods
*test* is the name of the test to use (one of IQR, Grubbs, Mahal
or none).
*portion* in (0, 1] is the amount of the chain to use
Updates *state*, *x* and *logp* to reflect the changes.
See :mod:`.outliers` for details.
"""
# Grab the last part of the chain histories
_, chains = self.logp()
chain_len, Nchains = chains.shape
outliers = identify_outliers(test, chains[-chain_len:], x)
# Loop over each outlier chain, replacing each with another
for old in outliers:
# Draw another chain at random, with replacement
while True:
new = rng.randint(Nchains)
if new not in outliers:
break
# Update the saved state and current population
self._replace_outlier(old=old, new=new)
x[old, :] = x[new, :]
logp[old] = logp[new]
def mark_outliers(self, test='IQR', portion=1.0):
"""
Mark some chains as outliers but don't remove them. This can happen
after drawing is complete, so that chains that did not converge are
not included in the statistics.
*test* is 'IQR', 'Mahol' or 'none'.
*portion* indicates what portion of the samples should be included
in the outlier test. The default is to include all of them.
"""
_, chains, logp = self.chains()
if test == 'none':
self._good_chains = slice(None, None)
else:
Ngen = chains.shape[0]
start = int(Ngen*(1-portion)) if portion else 0
outliers = identify_outliers(test, logp[start:], chains[-1])
#print("outliers", outliers)
#print(logp.shape, chains.shape)
if len(outliers) > 0:
self._good_chains = np.array([i
for i in range(logp.shape[1])
if i not in outliers])
else:
self._good_chains = slice(None, None)
#print(self._good_chains)
def logp(self, full=False):
"""
Return the iteration number and the log likelihood for each point in
the individual sequences in that iteration.
For example, to plot the convergence of each sequence::
draw, logp = state.logp()
plot(draw, logp)
Note that draw[i] represents the total number of samples taken,
including those for the samples in logp[i].
If full is True, then return all chains, not just good chains.
"""
self._unroll()
retval = self._gen_draws, self._gen_logp
if self.generation == self._gen_index:
retval = [v[:self.generation] for v in retval]
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""libFuzzer engine interface."""
from builtins import str
import os
import re
import tempfile
from base import utils
from bot.fuzzers import dictionary_manager
from bot.fuzzers import engine
from bot.fuzzers import engine_common
from bot.fuzzers import libfuzzer
from bot.fuzzers import strategy_selection
from bot.fuzzers import utils as fuzzer_utils
from bot.fuzzers.libFuzzer import constants
from bot.fuzzers.libFuzzer import fuzzer
from bot.fuzzers.libFuzzer import stats
from datastore import data_types
from fuzzing import strategy
from metrics import logs
from metrics import profiler
from system import environment
from system import shell
ENGINE_ERROR_MESSAGE = 'libFuzzer: engine encountered an error'
DICT_PARSING_FAILED_REGEX = re.compile(
r'ParseDictionaryFile: error in line (\d+)')
MULTISTEP_MERGE_SUPPORT_TOKEN = b'fuzz target overwrites its const input'
def _project_qualified_fuzzer_name(target_path):
"""Return project qualified fuzzer name for a given target path."""
return data_types.fuzz_target_project_qualified_name(
utils.current_project(), os.path.basename(target_path))
def _is_multistep_merge_supported(target_path):
"""Checks whether a particular binary support multistep merge."""
# TODO(Dor1s): implementation below a temporary workaround, do not tell any
# body that we are doing this. The real solution would be to execute a
# fuzz target with '-help=1' and check the output for the presence of
# multistep merge support added in https://reviews.llvm.org/D71423.
# The temporary implementation checks that the version of libFuzzer is at
# least https://github.com/llvm/llvm-project/commit/da3cf61, which supports
# multi step merge: https://github.com/llvm/llvm-project/commit/f054067.
if os.path.exists(target_path):
with open(target_path, 'rb') as file_handle:
return utils.search_bytes_in_file(MULTISTEP_MERGE_SUPPORT_TOKEN,
file_handle)
return False
class MergeError(engine.Error):
"""Merge error."""
class LibFuzzerOptions(engine.FuzzOptions):
"""LibFuzzer engine options."""
def __init__(self, corpus_dir, arguments, strategies, fuzz_corpus_dirs,
extra_env, use_dataflow_tracing, is_mutations_run):
super(LibFuzzerOptions, self).__init__(corpus_dir, arguments, strategies)
self.fuzz_corpus_dirs = fuzz_corpus_dirs
self.extra_env = extra_env
self.use_dataflow_tracing = use_dataflow_tracing
self.is_mutations_run = is_mutations_run
class LibFuzzerEngine(engine.Engine):
"""LibFuzzer engine implementation."""
@property
def name(self):
return 'libFuzzer'
def prepare(self, corpus_dir, target_path, _):
"""Prepare for a fuzzing session, by generating options. Returns a
FuzzOptions object.
Args:
corpus_dir: The main corpus directory.
target_path: Path to the target.
build_dir: Path to the build directory.
Returns:
A FuzzOptions object.
"""
arguments = fuzzer.get_arguments(target_path)
grammar = fuzzer.get_grammar(target_path)
strategy_pool = strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,
use_generator=True,
engine_name=self.name)
strategy_info = libfuzzer.pick_strategies(strategy_pool, target_path,
corpus_dir, arguments, grammar)
arguments.extend(strategy_info.arguments)
# Check for seed corpus and add it into corpus directory.
engine_common.unpack_seed_corpus_if_needed(target_path, corpus_dir)
# Pick a few testcases from our corpus to use as the initial corpus.
subset_size = engine_common.random_choice(
engine_common.CORPUS_SUBSET_NUM_TESTCASES)
if (not strategy_info.use_dataflow_tracing and
strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY) and
shell.get_directory_file_count(corpus_dir) > subset_size):
# Copy |subset_size| testcases into 'subset' directory.
corpus_subset_dir = self._create_temp_corpus_dir('subset')
libfuzzer.copy_from_corpus(corpus_subset_dir, corpus_dir, subset_size)
strategy_info.fuzzing_strategies.append(
strategy.CORPUS_SUBSET_STRATEGY.name + '_' + str(subset_size))
strategy_info.additional_corpus_dirs.append(corpus_subset_dir)
else:
strategy_info.additional_corpus_dirs.append(corpus_dir)
# Check dict argument to make sure that it's valid.
dict_path = fuzzer_utils.extract_argument(
arguments, constants.DICT_FLAG, remove=False)
if dict_path and not os.path.exists(dict_path):
logs.log_error('Invalid dict %s for %s.' % (dict_path, target_path))
fuzzer_utils.extract_argument(arguments, constants.DICT_FLAG)
# If there's no dict argument, check for %target_binary_name%.dict file.
dict_path = fuzzer_utils.extract_argument(
arguments, constants.DICT_FLAG, remove=False)
if not dict_path:
dict_path = dictionary_manager.get_default_dictionary_path(target_path)
if os.path.exists(dict_path):
arguments.append(constants.DICT_FLAG + dict_path)
# If we have a dictionary, correct any items that are not formatted properly
# (e.g. quote items that are missing them).
dictionary_manager.correct_if_needed(dict_path)
strategies = stats.process_strategies(
strategy_info.fuzzing_strategies, name_modifier=lambda x: x)
return LibFuzzerOptions(
corpus_dir, arguments, strategies, strategy_info.additional_corpus_dirs,
strategy_info.extra_env, strategy_info.use_dataflow_tracing,
strategy_info.is_mutations_run)
def _create_empty_testcase_file(self, reproducers_dir):
"""Create an empty testcase file in temporary directory."""
_, path = tempfile.mkstemp(dir=reproducers_dir)
return path
def _create_temp_corpus_dir(self, name):
"""Create temporary corpus directory."""
new_corpus_directory = os.path.join(fuzzer_utils.get_temp_dir(), name)
engine_common.recreate_directory(new_corpus_directory)
return new_corpus_directory
def _create_merge_corpus_dir(self):
"""Create merge corpus directory."""
return self._create_temp_corpus_dir('merge-corpus')
def _merge_new_units(self, target_path, corpus_dir, new_corpus_dir,
fuzz_corpus_dirs, arguments, stat_overrides):
"""Merge new units."""
# Make a decision on whether merge step is needed at all. If there are no
# new units added by libFuzzer run, then no need to do merge at all.
new_units_added = shell.get_directory_file_count(new_corpus_dir)
if not new_units_added:
stat_overrides['new_units_added'] = 0
logs.log('Skipped corpus merge since no new units added by fuzzing.')
return
# If this times out, it's possible that we will miss some units. However, if
# we're taking >10 minutes to load/merge the corpus something is going very
# wrong and we probably don't want to make things worse by adding units
# anyway.
merge_corpus = self._create_merge_corpus_dir()
merge_dirs = fuzz_corpus_dirs[:]
# Merge the new units with the initial corpus.
if corpus_dir not in merge_dirs:
merge_dirs.append(corpus_dir)
old_corpus_len = shell.get_directory_file_count(corpus_dir)
new_units_added = 0
try:
result = self._minimize_corpus_two_step(
target_path=target_path,
arguments=arguments,
existing_corpus_dirs=merge_dirs,
new_corpus_dir=new_corpus_dir,
output_corpus_dir=merge_corpus,
reproducers_dir=None,
max_time=engine_common.get_merge_timeout(
libfuzzer.DEFAULT_MERGE_TIMEOUT))
libfuzzer.move_mergeable_units(merge_corpus, corpus_dir)
new_corpus_len = shell.get_directory_file_count(corpus_dir)
new_units_added = new_corpus_len - old_corpus_len
stat_overrides.update(result.stats)
except (MergeError, engine.TimeoutError) as e:
logs.log_warn('Merge failed.', error=repr(e))
stat_overrides['new_units_added'] = new_units_added
# Record the stats to make them easily searchable in stackdriver.
logs.log('Stats calculated.', stats=stat_overrides)
if new_units_added:
logs.log('New units added to corpus: %d.' % new_units_added)
else:
logs.log('No new units found.')
def fuzz(self, target_path, options, reproducers_dir, max_time):
"""Run a fuzz session.
Args:
target_path: Path to the target.
options: The FuzzOptions object returned by prepare().
reproducers_dir: The directory to put reproducers in when crashes
are found.
max_time: Maximum allowed time for the fuzzing to run.
Returns:
A FuzzResult object.
"""
profiler.start_if_needed('libfuzzer_fuzz')
runner = libfuzzer.get_runner(target_path)
libfuzzer.set_sanitizer_options(target_path, fuzz_options=options)
# Directory to place new units.
new_corpus_dir = self._create_temp_corpus_dir('new')
corpus_directories = [new_corpus_dir] + options.fuzz_corpus_dirs
fuzz_timeout = libfuzzer.get_fuzz_timeout(
options.is_mutations_run, total_timeout=max_time)
fuzz_result = runner.fuzz(
corpus_directories,
fuzz_timeout=fuzz_timeout,
additional_args=options.arguments,
artifact_prefix=reproducers_dir,
extra_env=options.extra_env)
project_qualified_fuzzer_name = _project_qualified_fuzzer_name(target_path)
dict_error_match = DICT_PARSING_FAILED_REGEX.search(fuzz_result.output)
if dict_error_match:
logs.log_error(
'Dictionary parsing failed (target={target}, line={line}).'.format(
target=project_qualified_fuzzer_name,
line=dict_error_match.group(1)),
engine_output=fuzz_result.output)
elif (not environment.get_value('USE_MINIJAIL') and
fuzz_result.return_code == constants.LIBFUZZER_ERROR_EXITCODE):
# Minijail returns 1 if the exit code is nonzero.
# Otherwise: we can assume that a return code of 1 means that libFuzzer
# itself ran into an error.
logs.log_error(
ENGINE_ERROR_MESSAGE +
' (target={target}).'.format(target=project_qualified_fuzzer_name),
engine_output=fuzz_result.output)
log_lines = fuzz_result.output.splitlines()
# Output can be large, so save some memory by removing reference to the
# original output which is no longer needed.
fuzz_result.output = None
# Check if we crashed, and get the crash testcase path.
crash_testcase_file_path = runner.get_testcase_path(log_lines)
# If we exited with a non-zero return code with no crash file in output from
# libFuzzer, this is most likely a startup crash. Use an empty testcase to
# to store it as a crash.
if not crash_testcase_file_path and fuzz_result.return_code:
crash_testcase_file_path = self._create_empty_testcase_file(
reproducers_dir)
# Parse stats information based on libFuzzer output.
parsed_stats = libfuzzer.parse_log_stats(log_lines)
# Extend parsed stats by additional performance features.
parsed_stats.update(
stats.parse_performance_features(log_lines, options.strategies,
options.arguments))
# Set some initial stat overrides.
timeout_limit = fuzzer_utils.extract_argument(
options.arguments, constants.TIMEOUT_FLAG, remove=False)
expected_duration = runner.get_max_total_time(fuzz_timeout)
actual_duration = int(fuzz_result.time_executed)
fuzzing_time_percent = 100 * actual_duration / float(expected_duration)
parsed_stats.update({
'timeout_limit': int(timeout_limit),
'expected_duration': expected_duration,
'actual_duration': actual_duration,
'fuzzing_time_percent': fuzzing_time_percent,
})
# Remove fuzzing arguments before merge and dictionary analysis step.
arguments = options.arguments[:]
libfuzzer.remove_fuzzing_arguments(arguments)
self._merge_new_units(target_path, options.corpus_dir, new_corpus_dir,
options.fuzz_corpus_dirs, arguments, parsed_stats)
fuzz_logs = '\n'.join(log_lines)
crashes = []
if crash_testcase_file_path:
# Use higher timeout for reproduction.
reproduce_arguments = arguments[:]
libfuzzer.fix_timeout_argument_for_reproduction(reproduce_arguments)
# Write the new testcase.
# Copy crash testcase contents into the main testcase path.
crashes.append(
engine.Crash(crash_testcase_file_path, fuzz_logs, reproduce_arguments,
actual_duration))
libfuzzer.analyze_and_update_recommended_dictionary(
runner, project_qualified_fuzzer_name, log_lines, options.corpus_dir,
arguments)
return engine.FuzzResult(fuzz_logs, fuzz_result.command, crashes,
parsed_stats, fuzz_result.time_executed)
def reproduce(self, target_path, input_path, arguments, max_time):
"""Reproduce a crash given an input.
Args:
target_path: Path to the target.
input_path: Path to the reproducer input.
arguments: Additional arguments needed for reproduction.
max_time: Maximum allowed time for the reproduction.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the reproduction exceeds max_time.
"""
runner = libfuzzer.get_runner(target_path)
libfuzzer.set_sanitizer_options(target_path)
# Remove fuzzing specific arguments. This is only really needed for legacy
# testcases, and can be removed in the distant future.
arguments = arguments[:]
libfuzzer.remove_fuzzing_arguments(arguments)
runs_argument = constants.RUNS_FLAG + str(constants.RUNS_TO_REPRODUCE)
arguments.append(runs_argument)
result = runner.run_single_testcase(
input_path, timeout=max_time, additional_args=arguments)
if result.timed_out:
raise engine.TimeoutError('Reproducing timed out\n' + result.output)
return engine.ReproduceResult(result.command, result.return_code,
result.time_executed, result.output)
def _minimize_corpus_two_step(self, target_path, arguments,
existing_corpus_dirs, new_corpus_dir,
output_corpus_dir, reproducers_dir, max_time):
"""Optional (but recommended): run corpus minimization.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for corpus minimization.
existing_corpus_dirs: Input corpora that existed before the fuzzing run.
new_corpus_dir: Input corpus that was generated during the fuzzing run.
Must have at least one new file.
output_corpus_dir: Output directory to place minimized corpus.
| |
<filename>pyrobolearn/utils/converter.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide converter classes which allows to convert from one certain data type to another.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import quaternion
import collections
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def roll(lst, shift):
"""Roll elements of a list. This is similar to `np.roll()`"""
return lst[-shift:] + lst[:-shift]
def numpy_to_torch(tensor):
"""Convert from numpy array to pytorch tensor."""
return torch.from_numpy(tensor).float()
def torch_to_numpy(tensor):
"""Convert from pytorch tensor to numpy array."""
if tensor.requires_grad:
return tensor.detach().numpy()
return tensor.numpy()
class TypeConverter(object):
r"""Type Converter class
It describes how to convert a type to another type, and inversely. For instance, a numpy array to a pytorch Tensor,
and vice-versa.
"""
__metaclass__ = ABCMeta
def __init__(self, from_type, to_type):
self.from_type = from_type
self.to_type = to_type
@property
def from_type(self):
return self._from_type
@from_type.setter
def from_type(self, from_type):
if from_type is not None:
if isinstance(from_type, collections.Iterable):
for t in from_type:
if not isinstance(t, type):
raise TypeError("Expecting the from_type to be an instance of 'type'")
else:
if not isinstance(from_type, type):
raise TypeError("Expecting the from_type to be an instance of 'type'")
self._from_type = from_type
@property
def to_type(self):
return self._to_type
@to_type.setter
def to_type(self, to_type):
if to_type is not None:
if isinstance(to_type, collections.Iterable):
for t in to_type:
if not isinstance(t, type):
raise TypeError("Expecting the to_type to be an instance of 'type'")
else:
if not isinstance(to_type, type):
raise TypeError("Expecting the to_type to be an instance of 'type'")
self._to_type = to_type
@abstractmethod
def convert_from(self, data):
"""Convert to the 'from_type'"""
raise NotImplementedError
@abstractmethod
def convert_to(self, data):
"""Convert to the 'to_type'"""
raise NotImplementedError
def convert(self, data):
"""
Convert the data to the other type.
"""
if isinstance(data, self.from_type): # or self.from_type is None:
return self.convert_to(data)
return self.convert_from(data)
def __call__(self, data):
"""
Call the convert method, and return the converted data.
"""
return self.convert(data)
class IdentityConverter(TypeConverter):
r"""Identity Converter
Dummy converter which does not convert the data.
"""
def __init__(self):
super(IdentityConverter, self).__init__(None, None)
def convert_from(self, data):
return data
def convert_to(self, data):
return data
class NumpyListConverter(TypeConverter):
r"""Numpy - list converter
Convert lists/tuples to numpy arrays, and inversely.
"""
def __init__(self, convention=0):
"""Initialize the converter.
Args:
convention (int): convention to follow if 1D array. 0 to left it untouched, 1 to get column vector (i.e.
shape=(-1,1)), 2 to get row vector (i.e. shape=(1,-1)).
"""
super(NumpyListConverter, self).__init__(from_type=(list, tuple), to_type=np.ndarray)
# check convention
if not isinstance(convention, int):
raise TypeError("Expecting an integer for the convention {0,1,2}")
if convention < 0 or convention > 2:
raise ValueError("Expecting the convention to belong to {0,1,2}")
self.convention = convention
def convert_from(self, data):
"""Convert to list"""
if isinstance(data, self.from_type):
return list(data)
elif isinstance(data, self.to_type):
if len(data.shape) == 2 and (data.shape[0] == 1 or data.shape[1] == 1):
return data.ravel().tolist() # flatten data
return data.tolist()
else:
raise TypeError("Type not known: {}".format(type(data)))
def convert_to(self, data):
"""Convert to numpy array"""
if isinstance(data, self.to_type):
return data
elif isinstance(data, self.from_type):
data = np.array(data)
if len(data.shape) == 1:
if self.convention == 0: # left untouched
return data
elif self.convention == 1: # column vector
return data[:,np.newaxis]
else: # row vector
return data[np.newaxis,:]
else:
raise TypeError("Type not known: {}".format(type(data)))
def reshape(self, data, shape):
"""Reshape the data using the converter. Only valid if data is numpy array."""
if not isinstance(data, self.to_type):
data = self.convert_to(data)
return data.reshape(shape)
def transpose(self, data):
"""Transpose the data using the converter"""
if not isinstance(data, self.to_type):
data = self.convert_to(data)
return data.T
class QuaternionListConverter(TypeConverter):
r"""Quaternion - list converter
Convert a list/tuple to a quaternion, and vice-versa.
"""
def __init__(self, convention=0):
"""Initialize converter
Args:
convention (int): if 0, convert np.quaternion (w,x,y,z) to list [w,x,y,z], and inversely
if 1, convert np.quaternion (w,x,y,z) to list [x,y,z,w], and inversely
"""
super(QuaternionListConverter, self).__init__(from_type=(list, tuple), to_type=np.quaternion)
if not isinstance(convention, int) or convention < 0 or convention > 1:
raise TypeError("Expecting convention to be 0 or 1.")
self.convention = convention
def convert_from(self, data):
"""Convert to list"""
if isinstance(data, self.from_type):
return list(data)
elif isinstance(data, self.to_type):
return np.roll(quaternion.as_float_array(data), -self.convention).tolist()
else:
raise TypeError("Type not known: {}".format(type(data)))
def convert_to(self, data):
"""Convert to quaternion"""
if isinstance(data, self.to_type):
return data
elif isinstance(data, self.from_type):
return np.quaternion(*roll(data, -self.convention))
else:
raise TypeError("Type not known: {}".format(type(data)))
class QuaternionNumpyConverter(TypeConverter):
r"""Quaternion - numpy array converter
Convert a numpy array to a quaternion, and vice-versa.
"""
def __init__(self, convention=0):
"""Initialize converter
Args:
convention (int): if 0, convert np.quaternion (w,x,y,z) to list [w,x,y,z], and inversely
if 1, convert np.quaternion (w,x,y,z) to list [x,y,z,w], and inversely
"""
super(QuaternionNumpyConverter, self).__init__(from_type=np.ndarray, to_type=np.quaternion)
if not isinstance(convention, int) or convention < 0 or convention > 1:
raise TypeError("Expecting convention to be 0 or 1.")
self.convention = convention
def convert_from(self, data):
"""Convert to numpy array"""
if isinstance(data, self.from_type):
return data
elif isinstance(data, self.to_type):
return np.roll(quaternion.as_float_array(data), -self.convention)
else:
raise TypeError("Type not known: {}".format(type(data)))
def convert_to(self, data):
"""Convert to quaternion"""
if isinstance(data, self.to_type):
return data
elif isinstance(data, self.from_type):
return np.quaternion(*roll(data.ravel().tolist(), -self.convention))
else:
raise TypeError("Type not known: {}".format(type(data)))
def reshape(self, data, shape):
"""Reshape the data using the converter. Only valid if data is numpy array."""
if not isinstance(data, self.from_type):
data = self.convert_from(data)
return data.reshape(shape)
def transpose(self, data):
"""Transpose the data using the converter"""
if not isinstance(data, self.from_type):
data = self.convert_from(data)
return data.T
class QuaternionPyTorchConverter(TypeConverter):
r"""Quaternion - pytorch tensor converter
Convert a pytorch tensor to a quaternion, and vice-versa. Currently, it converts it first to a numpy array and
then the other type.
"""
def __init__(self, convention=0):
"""Initialize converter
Args:
convention (int): if 0, convert np.quaternion (w,x,y,z) to list [w,x,y,z], and inversely
if 1, convert np.quaternion (w,x,y,z) to list [x,y,z,w], and inversely
"""
super(QuaternionPyTorchConverter, self).__init__(from_type=torch.Tensor, to_type=np.quaternion)
if not isinstance(convention, int) or convention < 0 or convention > 1:
raise TypeError("Expecting convention to be 0 or 1.")
self.convention = convention
def convert_from(self, data):
"""Convert to pytorch tensor"""
if isinstance(data, self.from_type):
return data
elif isinstance(data, self.to_type):
return torch.from_numpy(np.roll(quaternion.as_float_array(data), -self.convention))
else:
raise TypeError("Type not known: {}".format(type(data)))
def convert_to(self, data):
"""Convert to quaternion"""
if isinstance(data, self.to_type):
return data
elif isinstance(data, self.from_type):
return np.quaternion(*roll(data.view(-1).data.tolist(), -self.convention))
else:
raise TypeError("Type not known: {}".format(type(data)))
def reshape(self, data, shape):
"""Reshape the data using the converter. Only valid if data is numpy array."""
if not isinstance(data, self.from_type):
data = self.convert_from(data)
return data.view(shape)
def transpose(self, data):
"""Transpose the data using the converter"""
if not isinstance(data, self.from_type):
data = self.convert_from(data)
return data.t()
class NumpyNumberConverter(TypeConverter):
r"""Numpy - number Converter
Convert a number to a numpy array of dimension 0 or 1, and vice-versa.
"""
def __init__(self, dim_array=1):
super(NumpyNumberConverter, self).__init__(from_type=(int, float), to_type=np.ndarray)
# dimension array
if not isinstance(dim_array, int):
raise TypeError("The 'dim_array' argument should be an integer.")
if dim_array < 0 or dim_array > 1:
raise ValueError("The 'dim_array' argument should be 0 or 1.")
self.dim_array = dim_array
def convert_from(self, data):
"""Convert to a number"""
if isinstance(data, self.from_type):
return data
elif isinstance(data, self.to_type):
dim = len(data.shape)
if dim == 0:
return data[()]
elif dim == 1:
return data[0]
else:
raise ValueError("The numpy array should have a shape length of 0 or 1.")
else:
raise TypeError("Type not known: {}".format(type(data)))
def convert_to(self, data):
"""Convert to numpy array"""
if isinstance(data, self.to_type):
return data
elif isinstance(data, self.from_type):
if self.dim_array == 0:
return np.array(data)
return np.array([data])
else:
raise TypeError("Type not known: {}".format(type(data)))
class PyTorchListConverter(TypeConverter):
r"""Pytorch - list converter
Convert lists/tuples to pytorch tensors. Currently, it converts it first to a numpy array and then the other type.
"""
def __init__(self, convention=0):
"""Initialize the converter.
Args:
convention (int): convention to follow if 1D array. 0 to left it untouched, 1 to get column vector (i.e.
shape=(-1,1)), 2 to get row vector (i.e. shape=(1,-1)).
"""
super(PyTorchListConverter, self).__init__(from_type=(tuple, list), to_type=torch.Tensor)
# check convention
if not isinstance(convention, int):
raise TypeError("Expecting an integer for the convention {0,1,2}")
if convention < 0 or convention > 2:
raise ValueError("Expecting the convention to belong to {0,1,2}")
self.convention = convention
def convert_from(self, data):
"""Convert to list"""
if isinstance(data, self.from_type):
return list(data)
elif isinstance(data, self.to_type):
data = data.numpy() # convert to numpy first
if len(data.shape) == 2 and (data.shape[0] == 1 or data.shape[1] == 1):
return data.ravel().tolist() # flatten data
return data.tolist()
else:
raise TypeError("Type not known: | |
!= len(eids):
msg = 'eids=%s len(eids)=%s delta_eid=%s must be continuous' % (
eids, len(eids), delta_eid)
raise RuntimeError(msg)
#list_fields += eids
list_fields += [eids[0], 'THRU', eids[-1]]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
The writer method used by BDF.write_card()
Parameters
-----------
size : int; default=8
the size of the card (8/16)
"""
card = self.raw_fields()
if size == 8:
return self.comment + print_card_8(card)
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
#def PLOAD4_func(self, sid, eids, pressures,
#g1=None, g34=None, cid=0, nvector=None, surf_or_line='SURF',
#line_load_dir='NORM', comment=''):
#"""
#Creates a PLOAD4 card
#Solid Format
#============
#Defines a pressure load on a face of a CHEXA, CPENTA, or CTETRA element.
#+--------+-----+-----+----+----+------+------+------+-------+
#| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
#+========+=====+=====+====+====+======+======+======+=======+
#| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | G1 | G3/G4 |
#+--------+-----+-----+----+----+------+------+------+-------+
#| | CID | N1 | N2 | N3 | SORL | LDIR | | |
#+--------+-----+-----+----+----+------+------+------+-------+
#Shell Format
#============
#Defines a pressure load on a face of a CTRIA3, CTRIA6, CTRIAR,
#CQUAD4, CQUAD8, or CQUADR element.
#+--------+-----+-----+----+----+------+------+------+-------+
#| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
#+========+=====+=====+====+====+======+======+======+=======+
#| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | THRU | EID2 |
#+--------+-----+-----+----+----+------+------+------+-------+
#| | CID | N1 | N2 | N3 | SORL | LDIR | | |
#+--------+-----+-----+----+----+------+------+------+-------+
#.. warning:: NX does not support SORL and LDIR, MSC does
#"""
#if g34 is None:
#return PLOAD4Solid(
#sid, eids, pressures,
#g1=None, g34=None, cid=0, nvector=None, surf_or_line='SURF',
#line_load_dir='NORM', comment='')
#return PLOAD4Shell(
#sid, eids, pressures, cid=0, nvector=None, surf_or_line='SURF',
#line_load_dir='NORM', comment='')
#class PLOAD4Shell(PLOAD4):
#def __init__(self, sid, eids, pressures, g1=None, g34=None, cid=0,
#nvector=None, surf_or_line='SURF',
#line_load_dir='NORM', comment=''):
#PLOAD4.__init__(self, sid, eids, pressures, g1=None, g34=None,
#cid=0, nvector=None,
#surf_or_line='SURF',
#line_load_dir='NORM',
#comment='')
#class PLOAD4Shell(PLOAD4):
#def __init__(self, sid, eids, pressures, g1=None, g34=None, cid=0,
#nvector=None, surf_or_line='SURF',
#line_load_dir='NORM', comment=''):
#PLOAD4.__init__(self, sid, eids, pressures, g1=g1, g34=g34,
#cid=cid, nvector=nvector,
#surf_or_line=surf_or_line,
#line_load_dir=line_load_dir,
#comment=comment)
class PLOAD4(Load):
"""
``Solid Format``
Defines a pressure load on a face of a CHEXA, CPENTA, or CTETRA element.
+--------+-----+-----+----+----+------+------+------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+=====+====+====+======+======+======+=======+
| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | G1 | G3/G4 |
+--------+-----+-----+----+----+------+------+------+-------+
| | CID | N1 | N2 | N3 | SORL | LDIR | | |
+--------+-----+-----+----+----+------+------+------+-------+
``Shell Format``
Defines a pressure load on a face of a CTRIA3, CTRIA6, CTRIAR,
CQUAD4, CQUAD8, or CQUADR element.
+--------+-----+-----+----+----+------+------+------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+=====+====+====+======+======+======+=======+
| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | THRU | EID2 |
+--------+-----+-----+----+----+------+------+------+-------+
| | CID | N1 | N2 | N3 | SORL | LDIR | | |
+--------+-----+-----+----+----+------+------+------+-------+
.. warning:: NX does not support SORL and LDIR, MSC does
"""
type = 'PLOAD4'
_properties = ['node_ids', 'element_ids']
@classmethod
def _init_from_empty(cls):
sid = 1
eids = [1]
pressures = [1.]
g1 = None
g34 = None
return PLOAD4(sid, eids, pressures, g1, g34,
cid=0,
surf_or_line='SURF')
def __init__(self, sid, eids, pressures, g1, g34,
cid=0, nvector=None, surf_or_line='SURF',
line_load_dir='NORM', comment=''):
"""
Creates a PLOAD4 card
Parameters
----------
sid : int
the load id
eids : List[int, ...]
shells : the range of element ids; must be sequential
solids : must be length 1
pressures : List[float, float, float, float] / float
float : turned into a list of length 4
List[float] :
tri : must be length 4 (the last value should be the same as the 0th value)
quad : must be length 4
g1 : int/None
only used for solid elements
g34 : int / None
only used for solid elements
cid : int; default=0
the coordinate system for nvector
nvector : (3, ) float ndarray
blank : load acts normal to the face
float : the local pressure vector
surf_or_line : str; default='SURF'
SURF : surface load
LINE : line load (only defined for QUADR, TRIAR)
not supported
line_load_dir : str; default='NORM'
direction of the line load (see surf_or_line); {X, Y, Z, TANG, NORM}
not supported
comment : str; default=''
a comment for the card
TODO: fix the way "pressures" works
"""
if nvector is None:
nvector = np.zeros(3, dtype='float64')
else:
nvector = np.asarray(nvector, dtype='float64')
if comment:
self.comment = comment
if isinstance(eids, integer_types):
eids = [eids]
if isinstance(pressures, float_types):
pressures = [pressures] * 4
# TODO: handle default pressure as input
self.sid = sid
# these can be greater than 1 if it's a shell (not a solid)
self.eids = eids
self.pressures = np.asarray(pressures, dtype='float64')
if surf_or_line == 'SURF':
inan = np.isnan(self.pressures)
self.pressures[inan] = pressures[0]
#: used for solid element only
self.g1 = g1
#: g3/g4 - different depending on CHEXA/CPENTA or CTETRA
self.g34 = g34
#: Coordinate system identification number. See Remark 2.
#: (Integer >= 0;Default=0)
self.cid = cid
self.nvector = nvector
# flag with values of SURF/LINE
self.surf_or_line = surf_or_line
# Line load direction
#
# 1. X, Y, Z : line load in x/y/z in the element coordinate
# system
# 2. TANG : line load is tangent to the edge pointing
# from G1 to G2
# 3. NORM : line load is in the mean plane, normal to the
# edge and pointing outwards from the element
#
# if cid=N123 = 0: line_load_dir_default=NORM
self.line_load_dir = line_load_dir
#self.eid_ref = None
self.g1_ref = None
self.g34_ref = None
self.cid_ref = None
self.eids_ref = None
def validate(self):
if self.surf_or_line not in ['SURF', 'LINE']:
raise RuntimeError('PLOAD4; sid=%s surf_or_line=%r' % (self.sid, self.surf_or_line))
if self.line_load_dir not in ['LINE', 'X', 'Y', 'Z', 'TANG', 'NORM']:
raise RuntimeError(self.line_load_dir)
assert self.g1 != 0, str(self)
assert self.g34 != 0, str(self)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a PLOAD4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
eid = integer(card, 2, 'eid')
p1 = double_or_blank(card, 3, 'p1', 0.0)
pressures = [
p1,
double_or_blank(card, 4, 'p2'),
double_or_blank(card, 5, 'p3'),
double_or_blank(card, 6, 'p4')]
eids = [eid]
g1_thru = integer_string_or_blank(card, 7, 'g1/THRU')
if g1_thru == 'THRU' and integer_or_blank(card, 8, 'eid2'):
# alternate form
eid2 = integer(card, 8, 'eid2')
if eid2:
eids = list(unique(
expand_thru([eid, 'THRU', eid2], set_fields=False, sort_fields=False)
))
g1 = None
g34 = None
else:
# standard form
eids = [eid]
g1 = integer_or_blank(card, 7, 'g1')
g34 = integer_or_blank(card, 8, 'g34')
# If both (CID, N1, n2, N3) and LDIR are blank, then the default is
# LDIR=NORM.
cid = integer_or_blank(card, 9, 'cid')
n1 = double_or_blank(card, 10, 'N1', 0.)
n2 = double_or_blank(card, 11, 'N2', 0.)
n3 = double_or_blank(card, 12, 'N3', 0.)
nvector = array([n1, n2, n3])
surf_or_line = string_or_blank(card, 13, 'sorl', 'SURF')
line_load_dir = string_or_blank(card, 14, 'ldir', 'NORM')
assert len(card) <= 15, f'len(PLOAD4 card) = {len(card):d}\ncard={card}'
return PLOAD4(sid, eids, pressures, g1, g34, cid, nvector,
surf_or_line, line_load_dir, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a PLOAD4 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
sid = data[0]
eid = data[1]
pressures = data[2]
g1 = data[3]
g34 = data[4]
if g1 == 0:
g1 = None
if g34 == 0:
g34 = None
cid = data[5]
nvector = data[6]
if cid == 0 and nvector == [0., 0., 0.]:
# these are apparently the secret defaults
# it just means to use the normal vector
cid = None
nvector = None
surf_or_line = data[7]
eids = [eid]
if data[7] is None:
surf_or_line = 'SURF'
assert data[8] is None, data
line_load_dir = 'NORM'
| |
<gh_stars>10-100
"""
This module contains the classes :class:`hyvr.input.option_parsing.Section` and
:class:`hyvr.input.option_parsing.Option` to simplify parsing and validating
input-files (``*.ini``).
Every HyVR option should have a corresponding instance of an
:class:`~hyvr.input.option_parsing.Option`, which is defined in
:mod:`hyvr.input.options` in the options lists. These option lists
are then assigned to :class:`~hyvr.input.option_parsing.Section` instances in
:func:`hyvr.input.parameters.parse_inifile`.
If you add functionality to HyVR, you most likely only have to add an option to
the option lists, normally you don't have to change anything in here (at least
if you're lucky, since this is a very recursive mess).
:Author: <NAME>
"""
import sys
from copy import deepcopy
__all__ = ["Section", "Option", "MissingSectionError", "MissingOptionError", "ShapeError", "assert_exists"]
class Section():
"""
A ``Section`` instance corresponds to a section in an ini-file.
A section is mainly a wrapper for a list of options, and therefore only has
a name and a list of options. Upon parsing a section, it checks whether all
given options in the inifile are actually expected, and then delegates the
parsing of single options to the :class:`~hyvr.input.option_parsing.Option`
class.
"""
def __init__(self, name, options):
"""
Parameters
----------
name : str
Name of the section
options : list
List of Options
"""
self.name = name
self.options = deepcopy(options)
self.optionnames = [o.name for o in options]
def parse(self, section_dict):
"""
Parses and validates options of the section given a dictionary
containing key-value pairs of the options. If options are not present,
default values might be set.
Parameters
----------
section_dict : dictionary
Dictionary of section values. This can for example be obtained
using ``configparser``::
p = configparser.ConfigParser()
p.read(filename)
section_dict = dict(p[section_name])
Returns
-------
section_dict : dictionary
The same dictionary with parsed and validated values
"""
self.dict = section_dict
for option in section_dict:
if option not in self.optionnames:
print("Warning: Unknown option: {:s} in section {:s}".format(
option, self.name), file=sys.stderr
)
for option, name in zip(self.options, self.optionnames):
self.dict[name] = option.parse(self)
return self.dict
def __repr__(self):
repr = "Section(name={:s},options=".format(self.name)
for name in self.optionnames:
repr += name + ','
repr += ')'
return repr
class Option():
"""
An ``Option`` instance is basically a parser for a specific option in a
ini-file.
An ``Option`` is typically part of a ``Section``, and normally
:func:`~hyvr.input.option_parsing.Option.parse` is called by the ``Section``
it belongs to.
The main tasks of an ``Option`` instance is to parse an option and make sure
it has the right type, and in the case of lists, the right shape.
Below is a description of its capabilities, note especially the ``shape``
parameter.
Parameters
----------
name : string
name of the option
dtype : type
type of the value of the option, e.g. float, str, int, or list.
If dtype is list, every entry of the list must have the same type.
optional : bool, optional (default: False)
whether the option is optional or not
default : optional, (default: None)
if optional=True, this default value will be used if this option is not
given.
shape : int or string or list/tuple of ints and/or strings, optional (only
required for lists, default: None)
If dtype is ``list``, this is the shape of the list.
There are several possibilities how to use this option:
* if ``shape=n`` where n is a nonnegative integer, the value must be
a list with length ``n``.
* if ``shape=-1`` the value can have an arbitrary shape.
* if ``shape="option1"``, the value of this option must have the same
shape as "option1". This is especially useful if the shape of
"option1" is set to -1.
* if ``shape=[2, 3]``, the value must be a list of lists, where the
outermost list has length 2 and the inner lists all have length 3.
This also works for more than 2 dimensions.
* if ``shape=[2, -1, 3]``, the value must be a list of lists of lists.
The outermost list must again have length 2, the innermost lists must
have length 3, and the lists at the intermediate level can have any
length (even different lengths).
* if ``shape=[2, "option1", 3]``, the values must again be a list of
lists similar to above, but now the lists at the intermediate level
must have the same length as "option1".
* if ``shape=[2, [1, 2], [[3], [3, 3]]]``, the value must be a list
of lists of lists. The outermost list must again have length 2. The
two lists it contains have length 1 and length 2. The innermost lists
all must have length 3.
It's also possible to only give the innermost value(s), e.g. for a list
with ``shape=[2, 3]`` only the value ``18``. This will then be expanded
to ``[[18, 18, 18], [18, 18, 18]]``. Similarly, ``[18, 19, 20]`` would
be expanded to ``[[18, 19, 20], [18, 19, 20]]``.
This expansion obviously doesn't work if ``shape=-1``.
If ``shape=[2, -1, 3]``, expansion is possible if the given value is
e.g. ``[[1, 2, 3], [1, 2, 3], [1, 2, 3]]``, but not if only ``[1, 2, 3]``
is given, since the length of the second dimension must be determined
from the given value.
datatype: int, float or string, optional (only required for lists, default: None)
Type of the innermost elements of the option in case the option is a list.
validation_func: function of one argument that returns a boolean, optional.
Returns true if the value (for lists or lists of lists this applies to
all values) is valid.
alternatives: list of strings
List of alternative names for this option. If the option is not found
in the given dictionary while parsing, these alternatives are used if
they exist. If an alternative is found (searching happens in the
supplied order), the option is stored under it's standard name.
"""
def __init__(self, name, dtype, optional=False, default=None, shape=-1,
datatype=None, validation_func=lambda x: True, alternatives=[]):
self.name = name
self.dtype = dtype
self.optional = optional
self.default = default
self.shape = shape
self.datatype = datatype
self.validation_func = validation_func
if not isinstance(alternatives, list):
alternatives = [alternatives]
self.alternatives = alternatives
# make sure we have shape and datatype for lists
if self.dtype == list:
if self.shape is None:
raise ValueError('Option ' + self.name + ' has type list, but no shape was given.')
if self.datatype is None:
raise ValueError('Option ' + self.name + ' has type list, but no datatype for its elements was given.')
def __repr__(self):
return "Option(name={:s}, dtype={:s}, optional={:s}, default={:s}, shape={:s}, datatype={:})".format(
self.name, str(self.dtype), str(self.optional), str(self.default), str(self.shape), str(self.datatype)
)
def parse(self, section):
"""
Parses the option based on it's attributes.
Parameters
----------
section : ``Section`` instance
The section it belongs to.
Returns
-------
value : self.type
The parsed value.
Raises
------
ShapeError
If the parsed list does not have the right shape.
ValueError
Other errors, description in text.
"""
# try to find alternatives if they exist
alternatives = deepcopy(self.alternatives)
while len(alternatives) != 0 and self.name not in section.dict:
other_name = alternatives.pop(0)
if other_name in section.dict:
section.dict[self.name] = section.dict[other_name]
del section.dict[other_name]
break
if not self.optional:
assert_exists(self.name, section.dict, section.name)
if self.name not in section.dict:
return self.default
else:
if self.dtype != list:
if self.dtype == bool:
# this is necessary since ``bool("False")`` returns ``True``.
value = parse_bool(section, self.name)
else:
value = self.dtype(section.dict[self.name])
if not self.validation_func(value):
raise ValueError('Invalid input for option ' + self.name +
' in section ' + section.name)
return value
else:
value = parse_list(section.dict[self.name], self.datatype)
# value validation
if not all_true(self.validation_func, value):
raise ValueError('Invalid input for option ' + self.name +
' in section ' + section.name)
shape = deepcopy(self.shape)
# now we need to get the correct shape
if shape == -1:
# we don't care for the shape of this
if not isinstance(value, list):
value = [value]
return value
if isinstance(shape, str):
# in this case we simply use the shape of the option with this name
if shape not in section.dict:
raise ValueError(self.name + ' in ' + section.name + ' has an invalid ' +\
'shape because the options whose shape it should have ' +\
'does not exist. Check | |
other._succ)
def __ne__(self, other):
r"""
Tests difference.
TEST::
sage: iet.RauzyDiagram('a b','b a') != iet.RauzyDiagram('a b c','c b a')
True
sage: r = iet.RauzyDiagram('a b c','c b a')
sage: r1 = iet.RauzyDiagram('a c b','c b a', alphabet='abc')
sage: r2 = iet.RauzyDiagram('a b c','c a b', alphabet='abc')
sage: r != r1
False
sage: r != r2
False
sage: r1 != r2
False
"""
return (
type(self) is not type(other) or
self._edge_types != other._edge_types or
self._succ.keys()[0] not in other._succ)
def vertices(self):
r"""
Returns a list of the vertices.
EXAMPLES::
sage: r = iet.RauzyDiagram('a b','b a')
sage: for p in r.vertices(): print p
a b
b a
"""
return [self._vertex_to_permutation(x) for x in self._succ.keys()]
def vertex_iterator(self):
r"""
Returns an iterator over the vertices
EXAMPLES::
sage: r = iet.RauzyDiagram('a b','b a')
sage: for p in r.vertex_iterator(): print p
a b
b a
::
sage: r = iet.RauzyDiagram('a b c d','d c b a')
sage: from itertools import ifilter
sage: r_1n = ifilter(lambda x: x.is_cylindric(), r)
sage: for p in r_1n: print p
a b c d
d c b a
"""
from itertools import imap
return imap(
lambda x: self._vertex_to_permutation(x),
self._succ.keys())
def edges(self,labels=True):
r"""
Returns a list of the edges.
EXAMPLES::
sage: r = iet.RauzyDiagram('a b','b a')
sage: len(r.edges())
2
"""
return list(self.edge_iterator())
def edge_iterator(self):
r"""
Returns an iterator over the edges of the graph.
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: for e in r.edge_iterator():
....: print e[0].str(sep='/'), '-->', e[1].str(sep='/')
a b/b a --> a b/b a
a b/b a --> a b/b a
"""
for x in self._succ.keys():
for i,y in enumerate(self._succ[x]):
if y is not None:
yield(
(self._vertex_to_permutation(x),
self._vertex_to_permutation(y),
i))
def edge_types_index(self, data):
r"""
Try to convert the data as an edge type.
INPUT:
- ``data`` - a string
OUTPUT:
integer
EXAMPLES:
For a standard Rauzy diagram (only right induction) the 0 index
corresponds to the 'top' induction and the index 1 corresponds to the
'bottom' one::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: r.edge_types_index('top')
0
sage: r[p][0] == p.rauzy_move('top')
True
sage: r.edge_types_index('bottom')
1
sage: r[p][1] == p.rauzy_move('bottom')
True
The special operations (inversion and symmetry) always appears after the
different Rauzy inductions::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram(symmetric=True)
sage: r.edge_types_index('symmetric')
2
sage: r[p][2] == p.symmetric()
True
This function always try to resolve conflictuous name. If it's
impossible a ValueError is raised::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram(left_induction=True)
sage: r.edge_types_index('top')
Traceback (most recent call last):
...
ValueError: left and right inductions must be differentiated
sage: r.edge_types_index('top_right')
0
sage: r[p][0] == p.rauzy_move(0)
True
sage: r.edge_types_index('bottom_left')
3
sage: r[p][3] == p.rauzy_move('bottom', 'left')
True
::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram(left_right_inversion=True,top_bottom_inversion=True)
sage: r.edge_types_index('inversion')
Traceback (most recent call last):
...
ValueError: left-right and top-bottom inversions must be differentiated
sage: r.edge_types_index('lr_inverse')
2
sage: p.lr_inverse() == r[p][2]
True
sage: r.edge_types_index('tb_inverse')
3
sage: p.tb_inverse() == r[p][3]
True
Short names are accepted::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram(right_induction='top',top_bottom_inversion=True)
sage: r.edge_types_index('top_rauzy_move')
0
sage: r.edge_types_index('t')
0
sage: r.edge_types_index('tb')
1
sage: r.edge_types_index('inversion')
1
sage: r.edge_types_index('inverse')
1
sage: r.edge_types_index('i')
1
"""
if not isinstance(data,str):
raise ValueError("the edge type must be a string")
if 'top_rauzy_move'.startswith(data) or 't_rauzy_move'.startswith(data):
if 'lt_rauzy' in self._index:
if 'rt_rauzy' in self._index:
raise ValueError("left and right inductions must "
"be differentiated")
return self._index['lt_rauzy']
if 'rt_rauzy' in self._index:
return self._index['rt_rauzy']
raise ValueError("no top induction in this Rauzy diagram")
if ('bottom_rauzy_move'.startswith(data) or
'b_rauzy_move'.startswith(data)):
if 'lb_rauzy' in self._index:
if 'rb_rauzy' in self._index:
raise ValueError("left and right inductions must "
"be differentiated")
return self._index['lb_rauzy']
if 'rb_rauzy' in self._index:
return self._index['rb_rauzy']
raise ValueError("no bottom Rauzy induction in this diagram")
if ('left_rauzy_move'.startswith(data) or
'l_rauzy_move'.startswith(data)):
if 'lt_rauzy' in self._index:
if 'lb_rauzy' in self._index:
raise ValueError("top and bottom inductions must be differentiated")
return self._index['lt_rauzy']
if 'lb_rauzy' in self._index:
return self._index('lb_rauzy')
raise ValueError("no left Rauzy induction in this diagram")
if ('lt_rauzy_move'.startswith(data) or
'tl_rauzy_move'.startswith(data) or
'left_top_rauzy_move'.startswith(data) or
'top_left_rauzy_move'.startswith(data)):
if not 'lt_rauzy' in self._index:
raise ValueError("no top-left Rauzy induction in this diagram")
else:
return self._index['lt_rauzy']
if ('lb_rauzy_move'.startswith(data) or
'bl_rauzy_move'.startswith(data) or
'left_bottom_rauzy_move'.startswith(data) or
'bottom_left_rauzy_move'.startswith(data)):
if not 'lb_rauzy' in self._index:
raise ValueError("no bottom-left Rauzy induction in this diagram")
else:
return self._index['lb_rauzy']
if 'right'.startswith(data):
raise ValueError("ambiguity with your edge name: %s" % (data))
if ('rt_rauzy_move'.startswith(data) or
'tr_rauzy_move'.startswith(data) or
'right_top_rauzy_move'.startswith(data) or
'top_right_rauzy_move'.startswith(data)):
if not 'rt_rauzy' in self._index:
raise ValueError("no top-right Rauzy induction in this diagram")
else:
return self._index['rt_rauzy']
if ('rb_rauzy_move'.startswith(data) or
'br_rauzy_move'.startswith(data) or
'right_bottom_rauzy_move'.startswith(data) or
'bottom_right_rauzy_move'.startswith(data)):
if not 'rb_rauzy' in self._index:
raise ValueError("no bottom-right Rauzy induction in this diagram")
else:
return self._index['rb_rauzy']
if 'symmetric'.startswith(data):
if not 'symmetric' in self._index:
raise ValueError("no symmetric in this diagram")
else:
return self._index['symmetric']
if 'inversion'.startswith(data) or data == 'inverse':
if 'lr_inverse' in self._index:
if 'tb_inverse' in self._index:
raise ValueError("left-right and top-bottom inversions must be differentiated")
return self._index['lr_inverse']
if 'tb_inverse' in self._index:
return self._index['tb_inverse']
raise ValueError("no inversion in this diagram")
if ('lr_inversion'.startswith(data) or
data == 'lr_inverse' or
'left_right_inversion'.startswith(data) or
data == 'left_right_inverse'):
if not 'lr_inverse' in self._index:
raise ValueError("no left-right inversion in this diagram")
else:
return self._index['lr_inverse']
if ('tb_inversion'.startswith(data) or
data == 'tb_inverse' or
'top_bottom_inversion'.startswith(data)
or data == 'top_bottom_inverse'):
if not 'tb_inverse' in self._index:
raise ValueError("no top-bottom inversion in this diagram")
else:
return self._index['tb_inverse']
raise ValueError("this edge type does not exist: %s" % (data))
def edge_types(self):
r"""
Print information about edges.
EXAMPLES::
sage: r = iet.RauzyDiagram('a b', 'b a')
sage: r.edge_types()
0: rauzy_move(0, -1)
1: rauzy_move(1, -1)
::
sage: r = iet.RauzyDiagram('a b', 'b a', left_induction=True)
sage: r.edge_types()
0: rauzy_move(0, -1)
1: rauzy_move(1, -1)
2: rauzy_move(0, 0)
3: rauzy_move(1, 0)
::
sage: r = iet.RauzyDiagram('a b',' b a',symmetric=True)
sage: r.edge_types()
0: rauzy_move(0, -1)
1: rauzy_move(1, -1)
2: symmetric()
"""
for i,(edge_type,t) in enumerate(self._edge_types):
print str(i) + ": " + edge_type + str(t)
def alphabet(self, data=None):
r"""
TESTS::
sage: r = iet.RauzyDiagram('a b','b a')
sage: r.alphabet() == Alphabet(['a','b'])
True
sage: r = iet.RauzyDiagram([0,1],[1,0])
sage: r.alphabet() == Alphabet([0,1])
True
"""
if data is None:
return self._element._alphabet
else:
self._element._set_alphabet(data)
def letters(self):
r"""
Returns the letters used by the RauzyDiagram.
EXAMPLES::
sage: r = iet.RauzyDiagram('a b','b a')
sage: r.alphabet()
{'a', 'b'}
sage: r.letters()
['a', 'b']
sage: r.alphabet('ABCDEF')
sage: r.alphabet()
{'A', 'B', 'C', 'D', 'E', 'F'}
sage: r.letters()
['A', 'B']
"""
return self._element.letters()
def _vertex_to_permutation(self,data=None):
r"""
Converts the (vertex) data to a permutation.
TESTS:
sage: r = iet.RauzyDiagram('a b','b a') #indirect doctest
"""
if data is not None:
self._set_element(data)
return copy(self._element)
def edge_to_matrix(self, p=None, edge_type=None):
r"""
Return the corresponding matrix
INPUT:
- ``p`` - a permutation
- ``edge_type`` - 0 or 1 corresponding to the type of the edge
OUTPUT:
A matrix
EXAMPLES::
sage: p = iet.Permutation('a b c','c b a')
sage: d = p.rauzy_diagram()
sage: print d.edge_to_matrix(p,1)
[1 0 1]
[0 1 0]
[0 0 1]
"""
if p is None and edge_type is None:
return identity_matrix(self._n)
function_name = self._edge_types[edge_type][0] + '_matrix'
if not hasattr(self._element_class,function_name):
return identity_matrix(self._n)
arguments = self._edge_types[edge_type][1]
return getattr(p,function_name)(*arguments)
def edge_to_winner(self, p=None, edge_type=None):
r"""
Return the corresponding winner
TEST::
sage: r = iet.RauzyDiagram('a b','b a')
sage: r.edge_to_winner(None,None)
[]
"""
if p is None and edge_type is None:
return []
function_name = self._edge_types[edge_type][0] + '_winner'
if not hasattr(self._element_class, function_name):
return [None]
arguments = self._edge_types[edge_type][1]
return [getattr(p,function_name)(*arguments)]
def edge_to_loser(self, p=None, edge_type=None):
r"""
Return the corresponding loser
TEST::
sage: r = iet.RauzyDiagram('a b','b a')
sage: r.edge_to_loser(None,None)
[]
"""
if p is None and edge_type is None:
return []
function_name = self._edge_types[edge_type][0] + '_loser'
if not hasattr(self._element_class, function_name):
return [None]
arguments = self._edge_types[edge_type][1]
return [getattr(p,function_name)(*arguments)]
def _all_npath_extension(self, g, length=0):
r"""
Returns an iterator over all extension of fixed length of p.
INPUT:
- ``p`` - a path
- ``length`` - a non-negative integer
TESTS:
::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: g0 = r.path(p)
sage: for g in r._all_npath_extension(g0,0):
....: print g
Path of length 0 in a Rauzy | |
thetas
theta_cand = cov_theta_cand.gen_cand(theta, m)
for i in range(setup.nexp):
# Find new candidate values for theta
theta_eval_mat[i][:] = theta[i][m-1].reshape(setup.ntemps * setup.ntheta[i], setup.p)
#theta_cand[i][:] = chol_sample_1per(theta[i][m-1], S[i])
theta_cand_mat[i][:] = theta_cand[i].reshape(setup.ntemps * setup.ntheta[i], setup.p)
# Check constraints
good_values_mat[i][:] = setup.checkConstraints(
tran_unif(theta_cand_mat[i], setup.bounds_mat, setup.bounds.keys()), setup.bounds,
)
good_values[i][:] = good_values_mat[i].reshape(setup.ntemps, setup.ntheta[i])
# Generate Predictions at new Theta values
theta_eval_mat[i][good_values_mat[i]] = theta_cand_mat[i][good_values_mat[i]]
pred_cand[i][:] = setup.models[i].eval(
tran_unif(theta_eval_mat[i], setup.bounds_mat, setup.bounds.keys()), pool=False
)#.reshape(setup.ntemps, setup.y_lens[i])
#marg_lik_cov_curr[i] = [None] * setup.ntemps
for t in range(setup.ntemps):
for j in range(setup.ntheta[i]):
#marg_lik_cov_curr[i][t][j] = setup.models[i].lik_cov_inv(np.exp(log_s2[i][0, t, setup.s2_ind[i]])[setup.s2_ind[i]==j])
llik_cand[i][t][j] = setup.models[i].llik(setup.ys[i][theta_which_mat[i][j]], pred_cand[i][t][theta_which_mat[i][j]], marg_lik_cov_curr[i][t][j])
#sse_cand[i][:] = ((pred_cand[i] - setup.ys[i])**2 @ s2_ind_mat[i]) / s2[i][m-1]
# Calculate log-probability of MCMC accept
alpha[i][:] = - np.inf
alpha[i][good_values[i]] = itl_mat[i][good_values[i]] * (
#- 0.5 * (sse_cand[i][good_values[i]] - sse_curr[i][good_values[i]])
llik_cand[i][good_values[i]] - llik_curr[i][good_values[i]]
+ mvnorm_logpdf_(theta_cand[i], theta0[m-1],
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
- mvnorm_logpdf_(theta[i][m-1], theta0[m-1],
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
)
# MCMC Accept
accept[i][:] = np.log(uniform(size = alpha[i].shape)) < alpha[i]
# Where accept, make changes
theta[i][m][accept[i]] = theta_cand[i][accept[i]].copy()
#ind = accept[i] @ theta_ind_mat[i].T
#pred_curr[i][ind] = pred_cand[i][ind].copy()
for t in range(setup.ntemps):
accept_t = np.where(accept[i][t])[0]
if accept_t.shape[0] > 0:
ind = np.hstack([theta_which_mat[i][j] for j in accept_t])
pred_curr[i][t][ind] = pred_cand[i][t][ind].copy()
#for j in np.where(accept[i][t])[0]:
# pred_curr[i][t][theta_which_mat[i][j]] = pred_cand[i][t][theta_which_mat[i][j]]
llik_curr[i][accept[i]] = llik_cand[i][accept[i]].copy()
count[i][accept[i]] += 1
cov_theta_cand.count_100[i][accept[i]] += 1
#count_100[i][accept[i]] += 1
cov_theta_cand.update_tau(m)
#if m>10000:
# print('help')
# # Adaptive Metropolis Update
# if m % 100 == 0 and m > 300:
# delta = min(0.1, 1/np.sqrt(m+1)*5)
# for i in range(setup.nexp):
# tau[i][count_100[i] < 23] -= delta
# tau[i][count_100[i] > 23] += delta
# count_100[i] *= 0
## Decorrelation Step
if False:#m % setup.decor == 0:
for i in range(setup.nexp):
for k in range(setup.p):
# Find new candidate values for theta
theta_cand[i][:] = theta[i][m].copy()
theta_eval_mat[i][:] = theta[i][m].reshape(setup.ntheta[i] * setup.ntemps, setup.p)
theta_cand[i][:,:,k] = initfunc(size = (setup.ntemps, setup.ntheta[i]))
theta_cand_mat[i][:] = theta_cand[i].reshape(setup.ntheta[i]*setup.ntemps, setup.p)
# Compute constraint flags
good_values_mat[i][:] = setup.checkConstraints(
tran(theta_cand_mat[i], setup.bounds_mat, setup.bounds.keys()), setup.bounds,
)
# Generate predictions at "good" candidate values
theta_eval_mat[i][good_values_mat[i]] = theta_cand_mat[i][good_values_mat[i]]
good_values[i][:] = good_values_mat[i].reshape(setup.ntemps, setup.ntheta[i])
pred_cand[i][:] = setup.models[i].eval(
tran(theta_eval_mat[i], setup.bounds_mat, setup.bounds.keys())
)
sse_cand[i][:] = ((pred_cand[i] - setup.ys[i])**2 @ s2_ind_mat[i]) / s2[i][m-1] ## check the [:] here !!!!! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Calculate log-probability of MCMC Accept
alpha[i][:] = - np.inf
alpha[i][good_values[i]] = (
- 0.5 * itl_mat[i][good_values[i]] * (
sse_cand[i][good_values[i]] - sse_curr[i][good_values[i]]
)
+ itl_mat[i][good_values[i]] * (
+ mvnorm_logpdf_(theta_cand[i], theta0[m-1],
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
- mvnorm_logpdf_(theta[i][m], theta0[m-1],
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
)
) ## THIS NEEDS SOMETHING FOR THE PROPOSAL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MCMC Accept
accept[i][:] = (np.log(uniform(size = alpha[i].shape)) < alpha[i])
# Where accept, make changes
theta[i][m][accept[i]] = theta_cand[i][accept[i]].copy()
pred_curr[i][accept[i] @ s2_ind_mat[i].T] = \
pred_cand[i][accept[i] @ s2_ind_mat[i].T].copy()
sse_curr[i][accept[i]] = sse_cand[i][accept[i]].copy()
count_decor[i][accept[i], k] = count_decor[i][accept[i], k] + 1
#------------------------------------------------------------------------------------------
## update s2
for i in range(setup.nexp):
if setup.models[i].s2=='gibbs':
## gibbs update s2
dev_sq = (pred_curr[i] - setup.ys[i])**2 @ s2_ind_mat[i] # squared deviations
log_s2[i][m] = np.log(1 / np.random.gamma(
itl_mat[i] * (setup.ny_s2[i] / 2 + setup.ig_a[i] + 1) - 1,
1 / (itl_mat[i] * (setup.ig_b[i] + dev_sq / 2)),
))
for t in range(setup.ntemps):
s2_stretched = log_s2[i][m][t,setup.theta_ind[i]]
for j in range(setup.ntheta[i]):
marg_lik_cov_curr[i][t][j] = setup.models[i].lik_cov_inv(np.exp(s2_stretched[s2_which_mat[i][j]]))
llik_curr[i][t][j] = setup.models[i].llik(setup.ys[i][s2_which_mat[i][j]], pred_curr[i][t][s2_which_mat[i][j]], marg_lik_cov_curr[i][t][j])
elif setup.models[i].s2=='fix':
log_s2[i][m] = np.log(setup.sd_est[i]**2)
#for t in range(setup.ntemps):
# for j in range(setup.ntheta[i]):
# marg_lik_cov_curr[i][t][j] = setup.models[i].lik_cov_inv(np.exp(log_s2[i][m][t, setup.s2_ind[i]])[setup.s2_ind[i]==j])
# llik_curr[i][t][j] = setup.models[i].llik(setup.ys[i][setup.theta_ind[i]==j], pred_curr[i][t][setup.theta_ind[i]==j], marg_lik_cov_curr[i][t][j])
else: # this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# this needs to be fixed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## M-H update s2
# NOTE: there is something wrong with this...with no tempering, 10 kolski experiments,
# reasonable priors, s2 can diverge for some experiments (not a random walk, has weird patterns).
# This seems to be because of the joint update, but is strange. Could be that individual updates
# would make it go away, but it shouldn't be there anyway.
cov_ls2_cand[i].update(log_s2[i], m)
ls2_candi = cov_ls2_cand[i].gen_cand(log_s2[i], m)
llik_candi = np.zeros([setup.ntemps, setup.ntheta[i]])
marg_lik_cov_candi = [None] * setup.ntemps
for t in range(setup.ntemps):
marg_lik_cov_candi[t] = [None] * setup.ntheta[i]
for j in range(setup.ntheta[i]):
marg_lik_cov_candi[t][j] = setup.models[i].lik_cov_inv(np.exp(ls2_candi[t, setup.s2_ind[i]])[setup.s2_ind[i]==j])#s2[i][0, t, setup.s2_ind[i]])
llik_candi[t][j] = setup.models[i].llik(setup.ys[i][setup.theta_ind[i]==j], pred_curr[i][t][setup.theta_ind[i]==j], marg_lik_cov_candi[t][j])
# something wrong still, getting way too large of variance
#marg_lik_cov_candi[t] = setup.models[i].lik_cov_inv(np.exp(ls2_candi[t])[setup.s2_ind[i]])#s2[i][0, t, setup.s2_ind[i]])
#llik_candi[t] = setup.models[i].llik(setup.ys[i], pred_curr[i][t], marg_lik_cov_candi[t])
llik_diffi = (llik_candi - llik_curr[i])
alpha_s2 = setup.itl * (llik_diffi)
alpha_s2 += setup.itl * setup.s2_prior_kern[i](np.exp(ls2_candi), setup.ig_a[i], setup.ig_b[i]).sum(axis=1)#ldhc_kern(np.exp(ls2_cand[i])).sum(axis=1)#ldig_kern(np.exp(ls2_cand[i]),setup.ig_a[i],setup.ig_b[i]).sum(axis=1)
alpha_s2 += setup.itl * ls2_candi.sum(axis=1)
alpha_s2 -= setup.itl * setup.s2_prior_kern[i](np.exp(log_s2[i][m-1]), setup.ig_a[i], setup.ig_b[i]).sum(axis=1)#ldhc_kern(np.exp(log_s2[i][m-1])).sum(axis=1)#ldig_kern(np.exp(log_s2[i][m-1]),setup.ig_a[i],setup.ig_b[i]).sum(axis=1)
alpha_s2 -= setup.itl * log_s2[i][m-1].sum(axis=1)
runif = np.log(uniform(size=setup.ntemps))
for t in np.where(runif < alpha_s2)[0]:
count_s2[i, t] += 1
llik_curr[i][t] = llik_candi[t].copy()
log_s2[i][m][t] = ls2_candi[t].copy()
marg_lik_cov_curr[i][t] = marg_lik_cov_candi[t].copy()
cov_ls2_cand[i].count_100[t] += 1
cov_ls2_cand[i].update_tau(m)
if False:
## MH update s2
for i in range(setup.nexp):
cov_ls2_cand[i].update(log_s2[i], m)
llik_cand[i][:] = 0.
ls2_cand = [cov_ls2_cand[i].gen_cand(log_s2[i], m) for i in range(setup.nexp)]
marg_lik_cov_cand = [None] * setup.nexp
for i in range(setup.nexp):
marg_lik_cov_cand[i] = [None] * setup.ntemps
for t in range(setup.ntemps):
marg_lik_cov_cand[i][t] = [None] * setup.ntheta[i]
for j in range(setup.ntheta[i]):
marg_lik_cov_cand[i][t][j] = setup.models[i].lik_cov_inv(np.exp(ls2_cand[i][t, setup.s2_ind[i]])[setup.s2_ind[i]==j])#s2[i][0, t, setup.s2_ind[i]])
llik_cand[i][t][j] = setup.models[i].llik(setup.ys[i][setup.theta_ind[i]==j], pred_curr[i][t][setup.theta_ind[i]==j], marg_lik_cov_cand[i][t][j])
## joint update for ntheta[i] s2s
#llik_diff = (llik_cand.sum(axis=2) - llik_curr.sum(axis=2)) # should be summing over the nthera axis
alpha_s2[:] = - np.inf
#alpha_s2 = setup.itl * (llik_diff)
for i in range(setup.nexp): # this needs help...sum over ntheta axis
alpha_s2[i,:] = setup.itl * (llik_cand[i].sum(axis=1) - llik_curr[i].sum(axis=1))
alpha_s2[i,:] += setup.itl * setup.s2_prior_kern[i](np.exp(ls2_cand[i]), setup.ig_a[i], setup.ig_b[i]).sum(axis=1)
alpha_s2[i,:] += setup.itl * ls2_cand[i].sum(axis=1)
alpha_s2[i,:] -= setup.itl * setup.s2_prior_kern[i](np.exp(log_s2[i][m-1]), setup.ig_a[i], setup.ig_b[i]).sum(axis=1)
alpha_s2[i,:] -= setup.itl * log_s2[i][m-1].sum(axis=1)
runif = np.log(uniform(size=[setup.nexp, setup.ntemps]))
for i in range(setup.nexp):
for t in np.where(runif[i] < alpha_s2[i])[0]:
if np.any(ls2_cand[0][0] > np.log(100)) and t==0:
print('bad')
count_s2[i, t] += 1
llik_curr[i][t] = llik_cand[i][t].copy()
log_s2[i][m][t] = ls2_cand[i][t].copy()
marg_lik_cov_curr[i][t] = marg_lik_cov_cand[i][t].copy()
cov_ls2_cand[i].count_100[t] += 1
for i in range(setup.nexp):
cov_ls2_cand[i].update_tau(m)
# dev_sq[i][:] = (pred_curr[i] - setup.ys[i])**2 @ s2_ind_mat[i]
# s2[i][m] = 1 / np.random.gamma(
# (itl_mat[i] * setup.ny_s2[i] / 2 + setup.ig_a[i] + 1) - 1,
# 1 / (itl_mat[i] * (setup.ig_b[i] + dev_sq[i] / 2)),
# )
# sse_curr[i][:] = dev_sq[i] / s2[i][m]
## Gibbs update theta0
cc = np.linalg.inv(
np.einsum('t,tpq->tpq', ntheta * setup.itl, Sigma0_inv_curr) + theta0_prior_prec,
)
tbar *= 0.
for i in range(setup.nexp):
tbar += theta[i][m].sum(axis = 1)
tbar /= ntheta
dd = (
+ np.einsum('t,tl->tl', setup.itl, np.einsum('tlk,tk->tl', ntheta * Sigma0_inv_curr, tbar))
+ np.dot(theta0_prior_prec, theta0_prior_mean)
)
theta0[m][:] = chol_sample_1per_constraints(
np.einsum('tlk,tk->tl', cc, dd), cc,
setup.checkConstraints, setup.bounds_mat, setup.bounds.keys(), setup.bounds,
)
## Gibbs update Sigma0
mat *= 0.
for i in range(setup.nexp):
mat += np.einsum(
'tnp,tnq->tpq',
theta[i][m] - theta0[m].reshape(setup.ntemps, 1, setup.p),
theta[i][m] - theta0[m].reshape(setup.ntemps, 1, setup.p),
)
Sigma0_scales = Sigma0_prior_scale + np.einsum('t,tml->tml',setup.itl,mat)
for t in range(setup.ntemps):
Sigma0[m,t] = invwishart.rvs(df = Sigma0_dfs[t], scale = Sigma0_scales[t])
Sigma0_ldet_curr[:] = np.linalg.slogdet(Sigma0[m])[1]
Sigma0_inv_curr[:] = np.linalg.inv(Sigma0[m])
# better decorrelation step, joint
if m % setup.decor == 0:
for k in range(setup.p):
z = np.random.normal()*.1
theta0_cand = theta0[m].copy()
theta0_cand[:,k] += z
good_values_theta0 = setup.checkConstraints(
tran_unif(theta0_cand, setup.bounds_mat, setup.bounds.keys()), setup.bounds,
)
for i in range(setup.nexp):
# Find new candidate values for theta
theta_cand[i][:] = theta[i][m].copy()
theta_eval_mat[i][:] = theta[i][m].reshape(setup.ntheta[i] * setup.ntemps, setup.p)
theta_cand[i][:,:,k] += z
theta_cand_mat[i][:] = theta_cand[i].reshape(setup.ntheta[i]*setup.ntemps, setup.p)
# Compute constraint flags
good_values_mat[i][:] = setup.checkConstraints(
tran_unif(theta_cand_mat[i], setup.bounds_mat, setup.bounds.keys()), setup.bounds,
)
# Generate predictions at "good" candidate values
theta_eval_mat[i][good_values_mat[i]] = theta_cand_mat[i][good_values_mat[i]]
good_values[i][:] = (good_values_mat[i].reshape(setup.ntemps, setup.ntheta[i]).T * good_values_theta0).T
pred_cand[i][:] = setup.models[i].eval(
tran_unif(theta_eval_mat[i], setup.bounds_mat, setup.bounds.keys()), pool=False
)#.reshape(setup.ntemps, setup.ntheta[i], setup.y_lens[i])
#sse_cand[i][:] = ((pred_cand[i] - setup.ys[i])**2 @ s2_ind_mat[i]) / s2[i][m]
for t in range(setup.ntemps):
for j in range(setup.ntheta[i]):
#llik_cand[i][t][j] = setup.models[i].llik(setup.ys[i][setup.theta_ind[i]==j], pred_cand[i][t][setup.theta_ind[i]==j], marg_lik_cov_curr[i][t][j])
llik_cand[i][t][j] = setup.models[i].llik(setup.ys[i][theta_which_mat[i][j]], pred_cand[i][t][theta_which_mat[i][j]], marg_lik_cov_curr[i][t][j])
alpha[i][:] = - np.inf
alpha[i][good_values[i]] = (
itl_mat[i][good_values[i]] * (
llik_cand[i][good_values[i]] - llik_curr[i][good_values[i]]
)
+ itl_mat[i][good_values[i]] * (
+ mvnorm_logpdf_(theta_cand[i], theta0_cand,
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
- mvnorm_logpdf_(theta[i][m], theta0[m],
Sigma0_inv_curr, Sigma0_ldet_curr)[good_values[i]]
)
)
# now sum over alpha (for each temperature), add alpha for theta0 to prior, accept or reject
#alpha_tot = mvnorm_logpdf_(theta0_cand, theta0_prior_mean.reshape(setup.ntemps,setup.p), theta0_prior_prec, theta0_prior_ldet)*itl + sum(alpha)
alpha_tot = sum(alpha).T -0.5 * setup.itl * np.diag((theta0_cand - theta0_prior_mean) @ | |
#!/usr/bin/env python
#
# Copyright 2016 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client that manages Android compute engine instances.
** AndroidComputeClient **
AndroidComputeClient derives from ComputeClient. It manges a google
compute engine project that is setup for running Android instances.
It knows how to create android GCE images and instances.
** Class hierarchy **
base_cloud_client.BaseCloudApiClient
^
|
gcompute_client.ComputeClient
^
|
gcompute_client.AndroidComputeClient
"""
import getpass
import logging
import os
import uuid
from acloud import errors
from acloud.internal import constants
from acloud.internal.lib import gcompute_client
from acloud.internal.lib import utils
from acloud.public import config
logger = logging.getLogger(__name__)
_ZONE = "zone"
_VERSION = "version"
class AndroidComputeClient(gcompute_client.ComputeClient):
"""Client that manages Anadroid Virtual Device."""
IMAGE_NAME_FMT = "img-{uuid}-{build_id}-{build_target}"
DATA_DISK_NAME_FMT = "data-{instance}"
BOOT_COMPLETED_MSG = "VIRTUAL_DEVICE_BOOT_COMPLETED"
BOOT_STARTED_MSG = "VIRTUAL_DEVICE_BOOT_STARTED"
BOOT_CHECK_INTERVAL_SECS = 10
OPERATION_TIMEOUT_SECS = 20 * 60 # Override parent value, 20 mins
NAME_LENGTH_LIMIT = 63
# If the generated name ends with '-', replace it with REPLACER.
REPLACER = "e"
def __init__(self, acloud_config, oauth2_credentials):
"""Initialize.
Args:
acloud_config: An AcloudConfig object.
oauth2_credentials: An oauth2client.OAuth2Credentials instance.
"""
super(AndroidComputeClient, self).__init__(acloud_config,
oauth2_credentials)
self._zone = acloud_config.zone
self._machine_type = acloud_config.machine_type
self._min_machine_size = acloud_config.min_machine_size
self._network = acloud_config.network
self._orientation = acloud_config.orientation
self._resolution = acloud_config.resolution
self._metadata = acloud_config.metadata_variable.copy()
self._ssh_public_key_path = acloud_config.ssh_public_key_path
self._launch_args = acloud_config.launch_args
self._instance_name_pattern = acloud_config.instance_name_pattern
self._AddPerInstanceSshkey()
self._dict_report = {_ZONE: self._zone,
_VERSION: config.GetVersion()}
# TODO(147047953): New args to contorl zone metrics check.
def _VerifyZoneByQuota(self):
"""Verify the zone must have enough quota to create instance.
Returns:
Boolean, True if zone have enough quota to create instance.
Raises:
errors.CheckGCEZonesQuotaError: the zone doesn't have enough quota.
"""
if self.EnoughMetricsInZone(self._zone):
return True
raise errors.CheckGCEZonesQuotaError(
"There is no enough quota in zone: %s" % self._zone)
def _AddPerInstanceSshkey(self):
"""Add per-instance ssh key.
Assign the ssh publick key to instacne then use ssh command to
control remote instance via the ssh publick key. Added sshkey for two
users. One is vsoc01, another is current user.
"""
if self._ssh_public_key_path:
rsa = self._LoadSshPublicKey(self._ssh_public_key_path)
logger.info("ssh_public_key_path is specified in config: %s, "
"will add the key to the instance.",
self._ssh_public_key_path)
self._metadata["sshKeys"] = "{0}:{2}\n{1}:{2}".format(getpass.getuser(),
constants.GCE_USER,
rsa)
else:
logger.warning(
"ssh_public_key_path is not specified in config, "
"only project-wide key will be effective.")
@classmethod
def _FormalizeName(cls, name):
"""Formalize the name to comply with RFC1035.
The name must be 1-63 characters long and match the regular expression
[a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a
lowercase letter, and all following characters must be a dash,
lowercase letter, or digit, except the last character, which cannot be
a dash.
Args:
name: A string.
Returns:
name: A string that complies with RFC1035.
"""
name = name.replace("_", "-").lower()
name = name[:cls.NAME_LENGTH_LIMIT]
if name[-1] == "-":
name = name[:-1] + cls.REPLACER
return name
def _CheckMachineSize(self):
"""Check machine size.
Check if the desired machine type |self._machine_type| meets
the requirement of minimum machine size specified as
|self._min_machine_size|.
Raises:
errors.DriverError: if check fails.
"""
if self.CompareMachineSize(self._machine_type, self._min_machine_size,
self._zone) < 0:
raise errors.DriverError(
"%s does not meet the minimum required machine size %s" %
(self._machine_type, self._min_machine_size))
@classmethod
def GenerateImageName(cls, build_target=None, build_id=None):
"""Generate an image name given build_target, build_id.
Args:
build_target: Target name, e.g. "aosp_cf_x86_phone-userdebug"
build_id: Build id, a string, e.g. "2263051", "P2804227"
Returns:
A string, representing image name.
"""
if not build_target and not build_id:
return "image-" + uuid.uuid4().hex
name = cls.IMAGE_NAME_FMT.format(
build_target=build_target,
build_id=build_id,
uuid=uuid.uuid4().hex[:8])
return cls._FormalizeName(name)
@classmethod
def GetDataDiskName(cls, instance):
"""Get data disk name for an instance.
Args:
instance: An instance_name.
Returns:
The corresponding data disk name.
"""
name = cls.DATA_DISK_NAME_FMT.format(instance=instance)
return cls._FormalizeName(name)
def GenerateInstanceName(self, build_target=None, build_id=None):
"""Generate an instance name given build_target, build_id.
Target is not used as instance name has a length limit.
Args:
build_target: Target name, e.g. "aosp_cf_x86_phone-userdebug"
build_id: Build id, a string, e.g. "2263051", "P2804227"
Returns:
A string, representing instance name.
"""
name = self._instance_name_pattern.format(build_target=build_target,
build_id=build_id,
uuid=uuid.uuid4().hex[:8])
return self._FormalizeName(name)
def CreateDisk(self,
disk_name,
source_image,
size_gb,
zone=None,
source_project=None,
disk_type=gcompute_client.PersistentDiskType.STANDARD):
"""Create a gce disk.
Args:
disk_name: String, name of disk.
source_image: String, name to the image name.
size_gb: Integer, size in gigabytes.
zone: String, name of the zone, e.g. us-central1-b.
source_project: String, required if the image is located in a different
project.
disk_type: String, a value from PersistentDiskType, STANDARD
for regular hard disk or SSD for solid state disk.
"""
if self.CheckDiskExists(disk_name, self._zone):
raise errors.DriverError(
"Failed to create disk %s, already exists." % disk_name)
if source_image and not self.CheckImageExists(source_image):
raise errors.DriverError(
"Failed to create disk %s, source image %s does not exist." %
(disk_name, source_image))
super(AndroidComputeClient, self).CreateDisk(
disk_name,
source_image=source_image,
size_gb=size_gb,
zone=zone or self._zone)
@staticmethod
def _LoadSshPublicKey(ssh_public_key_path):
"""Load the content of ssh public key from a file.
Args:
ssh_public_key_path: String, path to the public key file.
E.g. ~/.ssh/acloud_rsa.pub
Returns:
String, content of the file.
Raises:
errors.DriverError if the public key file does not exist
or the content is not valid.
"""
key_path = os.path.expanduser(ssh_public_key_path)
if not os.path.exists(key_path):
raise errors.DriverError(
"SSH public key file %s does not exist." % key_path)
with open(key_path) as f:
rsa = f.read()
rsa = rsa.strip() if rsa else rsa
utils.VerifyRsaPubKey(rsa)
return rsa
# pylint: disable=too-many-locals, arguments-differ
@utils.TimeExecute("Creating GCE Instance")
def CreateInstance(self,
instance,
image_name,
machine_type=None,
metadata=None,
network=None,
zone=None,
disk_args=None,
image_project=None,
gpu=None,
extra_disk_name=None,
avd_spec=None,
extra_scopes=None,
tags=None):
"""Create a gce instance with a gce image.
Args:
instance: String, instance name.
image_name: String, source image used to create this disk.
machine_type: String, representing machine_type,
e.g. "n1-standard-1"
metadata: Dict, maps a metadata name to its value.
network: String, representing network name, e.g. "default"
zone: String, representing zone name, e.g. "us-central1-f"
disk_args: A list of extra disk args (strings), see _GetDiskArgs
for example, if None, will create a disk using the given
image.
image_project: String, name of the project where the image
belongs. Assume the default project if None.
gpu: String, type of gpu to attach. e.g. "nvidia-tesla-k80", if
None no gpus will be attached. For more details see:
https://cloud.google.com/compute/docs/gpus/add-gpus
extra_disk_name: String,the name of the extra disk to attach.
avd_spec: AVDSpec object that tells us what we're going to create.
extra_scopes: List, extra scopes (strings) to be passed to the
instance.
tags: A list of tags to associate with the instance. e.g.
["http-server", "https-server"]
"""
self._CheckMachineSize()
disk_args = self._GetDiskArgs(instance, image_name)
metadata = self._metadata.copy()
metadata["cfg_sta_display_resolution"] = self._resolution
metadata["t_force_orientation"] = self._orientation
metadata[constants.INS_KEY_AVD_TYPE] = avd_spec.avd_type
# Use another METADATA_DISPLAY to record resolution which will be
# retrieved in acloud list cmd. We try not to use cvd_01_x_res
# since cvd_01_xxx metadata is going to deprecated by cuttlefish.
metadata[constants.INS_KEY_DISPLAY] = ("%sx%s (%s)" % (
avd_spec.hw_property[constants.HW_X_RES],
avd_spec.hw_property[constants.HW_Y_RES],
avd_spec.hw_property[constants.HW_ALIAS_DPI]))
super(AndroidComputeClient, self).CreateInstance(
instance, image_name, self._machine_type, metadata, self._network,
self._zone, disk_args, image_project, gpu, extra_disk_name,
extra_scopes=extra_scopes, tags=tags)
def CheckBootFailure(self, serial_out, instance):
"""Determine if serial output has indicated any boot failure.
Subclass has to define this function to detect failures
in the boot process
Args:
serial_out: string
instance: string, instance name.
Raises:
Raises errors.DeviceBootError exception if a failure is detected.
"""
pass
def CheckBoot(self, instance):
"""Check once to see if boot completes.
Args:
instance: string, instance name.
Returns:
True if the BOOT_COMPLETED_MSG or BOOT_STARTED_MSG appears in serial
port output, otherwise False.
"""
try:
serial_out = self.GetSerialPortOutput(instance=instance, port=1)
self.CheckBootFailure(serial_out, instance)
return ((self.BOOT_COMPLETED_MSG in serial_out)
or (self.BOOT_STARTED_MSG in serial_out))
except errors.HttpError as e:
if e.code == 400:
logger.debug("CheckBoot: Instance is not ready yet %s", str(e))
return False
raise
def WaitForBoot(self, instance, boot_timeout_secs=None):
"""Wait for boot to completes or hit timeout.
Args:
instance: string, instance name.
boot_timeout_secs: Integer, the maximum time in seconds used to
wait for the AVD to boot.
"""
boot_timeout_secs = boot_timeout_secs or constants.DEFAULT_CF_BOOT_TIMEOUT
logger.info("Waiting for instance to boot up %s for %s secs",
instance, boot_timeout_secs)
timeout_exception = errors.DeviceBootTimeoutError(
"Device %s did not finish on boot within timeout (%s secs)" %
(instance, boot_timeout_secs))
utils.PollAndWait(
| |
out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = list(self.values())
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in list(self.values()))
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return list(self.values())[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = list(self.items())
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = list(values.items())
elif isinstance(values,list):
items = []
for val in values:
if isinstance(val,tuple):
items.append(val[:2])
else:
items.append((typehint, val))
elif isinstance(values,tuple):
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = list(self.items())
new_items = self._buildItemList(val)
if not isinstance(i,slice):
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = list(self.items())
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return list(self.values()).count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return list(self.values()).index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = list(self.items()) + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = list(self.items())
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = list(self.items())
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = list(self.items())
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = list(self.items())
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(list(self.values()))
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(list(self.items()))
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in list(self.values()):
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if isinstance(argument,dict):
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next).encode('latin1'))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if isinstance(next,str):
next = next.encode('latin1')
if isinstance(next,bytes):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = b''
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = data.find(b'\0')
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length].decode('latin1'), data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python | |
- mu ** 2) # use simple trig identity
# Sort the projected radii and corresponding intensity spectra into ascending
# order (i.e. from disk center to the limb), which is equivalent to sorting
# MU in descending order.
isort = np.argsort(rmu)
rmu = rmu[isort] # reorder projected radii
nmu = np.size(mu) # number of radii
if nmu == 1:
if vsini != 0:
logger.warning(
"Vsini is non-zero, but only one projected radius (mu value) is set. No rotational broadening will be performed."
)
vsini = 0 # ignore vsini if only 1 mu
# Calculate projected radii for boundaries of disk integration annuli. The n+1
# boundaries are selected such that r(i+1) exactly bisects the area between
# rmu(i) and rmu(i+1). The in!=rmost boundary, r(0) is set to 0 (disk center)
# and the outermost boundary, r(nmu) is set to 1 (limb).
if nmu > 1 or vsini != 0: # really want disk integration
r = np.sqrt(
0.5 * (rmu[:-1] ** 2 + rmu[1:] ** 2)
) # area midpoints between rmu
r = np.concatenate(([0], r, [1]))
# Calculate integration weights for each disk integration annulus. The weight
# is just given by the relative area of each annulus, normalized such that
# the sum of all weights is unity. Weights for limb darkening are included
# explicitly in the intensity profiles, so they aren't needed here.
wt = r[1:] ** 2 - r[:-1] ** 2 # weights = relative areas
else:
wt = np.array([1.0]) # single mu value, full weight
# Generate index vectors for input and oversampled points. Note that the
# oversampled indicies are carefully chosen such that every "os" finely
# sampled points fit exactly into one input bin. This makes it simple to
# "integrate" the finely sampled points at the end of the routine.
npts = inten.shape[1] # number of points
xpix = np.arange(npts, dtype=float) # point indices
nfine = os * npts # number of oversampled points
xfine = (0.5 / os) * (
2 * np.arange(nfine, dtype=float) - os + 1
) # oversampled points indices
# Loop through annuli, constructing and convolving with rotation kernels.
yfine = np.empty(nfine) # init oversampled intensities
flux = np.zeros(nfine) # init flux vector
for imu in range(nmu): # loop thru integration annuli
# Use external cubic spline routine (adapted from Numerical Recipes) to make
# an oversampled version of the intensity profile for the current annulus.
ypix = inten[isort[imu]] # extract intensity profile
if os == 1:
# just copy (use) original profile
yfine = ypix
else:
# spline onto fine wavelength scale
yfine = interp1d(xpix, ypix, kind="cubic")(xfine)
# Construct the convolution kernel which describes the distribution of
# rotational velocities present in the current annulus. The distribution has
# been derived analytically for annuli of arbitrary thickness in a rigidly
# rotating star. The kernel is constructed in two pieces: o!= piece for
# radial velocities less than the maximum velocity along the inner edge of
# the annulus, and one piece for velocities greater than this limit.
if vsini > 0:
# nontrivial case
r1 = r[imu] # inner edge of annulus
r2 = r[imu + 1] # outer edge of annulus
dv = deltav / os # oversampled velocity spacing
maxv = vsini * r2 # maximum velocity in annulus
nrk = 2 * int(maxv / dv) + 3 ## oversampled kernel point
# velocity scale for kernel
v = dv * (np.arange(nrk, dtype=float) - ((nrk - 1) / 2))
rkern = np.zeros(nrk) # init rotational kernel
j1 = np.abs(v) < vsini * r1 # low velocity points
rkern[j1] = np.sqrt((vsini * r2) ** 2 - v[j1] ** 2) - np.sqrt(
(vsini * r1) ** 2 - v[j1] ** 2
) # generate distribution
j2 = (np.abs(v) >= vsini * r1) & (np.abs(v) <= vsini * r2)
rkern[j2] = np.sqrt(
(vsini * r2) ** 2 - v[j2] ** 2
) # generate distribution
rkern = rkern / np.sum(rkern) # normalize kernel
# Convolve the intensity profile with the rotational velocity kernel for this
# annulus. Pad each end of the profile with as many points as are in the
# convolution kernel. This reduces Fourier ringing. The convolution may also
# be do!= with a routi!= called "externally" from IDL, which efficiently
# shifts and adds.
if nrk > 3:
yfine = convolve(yfine, rkern, mode="nearest")
# Calculate projected sigma for radial and tangential velocity distributions.
muval = mu[isort[imu]] # current value of mu
sigma = os * vrt / np.sqrt(2) / deltav # standard deviation in points
sigr = sigma * muval # reduce by current mu value
sigt = sigma * np.sqrt(1.0 - muval ** 2) # reduce by np.sqrt(1-mu**2)
# Figure out how many points to use in macroturbulence kernel.
nmk = int(10 * sigma)
nmk = np.clip(nmk, 3, (nfine - 3) // 2)
# Construct radial macroturbulence kernel with a sigma of mu*VRT/np.sqrt(2).
if sigr > 0:
xarg = np.linspace(-nmk, nmk, 2 * nmk + 1) / sigr
xarg = np.clip(-0.5 * xarg ** 2, -20, None)
mrkern = np.exp(xarg) # compute the gaussian
mrkern = mrkern / np.sum(mrkern) # normalize the profile
else:
mrkern = np.zeros(2 * nmk + 1) # init with 0d0
mrkern[nmk] = 1.0 # delta function
# Construct tangential kernel with a sigma of np.sqrt(1-mu**2)*VRT/np.sqrt(2).
if sigt > 0:
xarg = np.linspace(-nmk, nmk, 2 * nmk + 1) / sigt
xarg = np.clip(-0.5 * xarg ** 2, -20, None)
mtkern = np.exp(xarg) # compute the gaussian
mtkern = mtkern / np.sum(mtkern) # normalize the profile
else:
mtkern = np.zeros(2 * nmk + 1) # init with 0d0
mtkern[nmk] = 1.0 # delta function
# Sum the radial and tangential components, weighted by surface area.
area_r = 0.5 # assume equal areas
area_t = 0.5 # ar+at must equal 1
mkern = area_r * mrkern + area_t * mtkern # add both components
# Convolve the total flux profiles, again padding the spectrum on both ends to
# protect against Fourier ringing.
yfine = convolve(
yfine, mkern, mode="nearest"
) # add the padding and convolve
# Add contribution from current annulus to the running total.
flux = flux + wt[imu] * yfine # add profile to running total
flux = np.reshape(flux, (npts, os)) # convert to an array
flux = np.pi * np.sum(flux, axis=1) / os # sum, normalize
return flux
def sequential_synthesize_segments(
self,
sme,
segments,
wmod,
smod,
cmod,
reuse_wavelength_grid,
dll_id=None,
):
for il in tqdm(segments, desc="Segments", leave=False):
wmod[il], smod[il], cmod[il] = self.synthesize_segment(
sme,
il,
reuse_wavelength_grid,
il != segments[0],
dll_id=dll_id,
)
return wmod, smod, cmod
def get_dll_id(self, dll=None):
if dll is None:
dll = self.dll
if dll in __DLL_IDS__:
dll_id = __DLL_IDS__[dll]
elif dll in __DLL_DICT__:
dll_id = dll
else:
dll_id = uuid.uuid4()
__DLL_DICT__[dll_id] = dll
__DLL_IDS__[dll] = dll_id
return dll_id
def get_dll(self, dll_id=None):
if dll_id is None:
dll_id = self.dll
if dll_id in __DLL_DICT__:
return __DLL_DICT__[dll_id]
else:
return dll_id
def parallel_synthesize_segments(
self,
sme,
segments,
wmod,
smod,
cmod,
reuse_wavelength_grid,
dll_id=None,
):
# Make sure the dll is recorded in the global variables
dll = self.get_dll(dll_id)
dll_id = self.get_dll_id(dll)
# We calculate the first segment sequentially
with tqdm(desc="Segments", total=len(segments), leave=False) as progress:
il = segments[0]
wmod[il], smod[il], cmod[il] = self.synthesize_segment(
sme, il, reuse_wavelength_grid, False, dll_id=dll_id
)
progress.update(1)
# and then all others in parrallel
# since we can keep the line opacities from the calculation of the first segment
# TODO: do the line opacities also in parallel?
# For multiple Processes we need to pickle all the components
# BUT we can not pickle the smelib, since it has pointers (in the state)
# Therefore we cheat by putting the library in a global variable
# but only with a unqiue id, that | |
#!/bin/python3
#*##############################| OREGON TRAIL by KLUTCH |################################
#?################################## DEVELOPMENT LIST ####################################
### TODO:
# Difficulty setting
## Easy, Normal, Hard
## Adjust modifiers and starting values for each difficulty level
# Travel cycles
## Random events
## Time passed
## Locations
## River crossings
## Terrain and weather variables
# Game conditionals
## Travel pace
## Travel distance
## Health depletion
## Food depletion
## Sickness and death
# Game menus:
## Title screen menu
## Settings menu
## Save and load game menu
# Interaction
## Indians
## Townsfolk
## Other travelers
## Trading
## Randomization
# Supplies and purchases
## Subtract balance on purchases
## Add to balance if money added
# Highscores
## Show "Original Top Ten" high scores list
## Show "Current Top Ten" high scores list
## Reset the "Current Top Ten" high scores list
## Add player name to current high scores list if player qualifies
# Saving and loading game progress
## Generate a file to save progress data using key value pairs
## Export save file as text file (or csv?) and convert to "read only"
## Export readable PDF with current and/or end of game stats
## Load saved game files to continure game
# Hunting Module
## Animal and terrain selection and randomization
## How much food produced from a hunt
## Length of hunt in days
# Game dialog and interactivity
## General info, in-game messages, and errors
## Text animation and loading spinners
## Facts about the Oregon Trail AND the original Oregon Trail games
#* COMPLETED:
#* [√] Get names of the wagon party
#* [√] Display wagon party menu with player name(s), health %, and current status
#* [√] Display "deceased" under players that perish during the game
#* [√] Display player name, health %, and health status
#? #######################################################################################
# IMPORTS
import sys # SYSTEM FUNCTIONS
import os # OS FUNCTIONS
import random # RANDOM NUMBERS
import datetime # TIME AND DATE
import time # TIMING FUNCTIONS
# GAME DIALOG
divider = '------'* 6
select_option = 'SELECT AN OPTION:'
help_option = '\nPress [?] for HELP'
quit_option = '\nPress [q] to QUIT'
continue_option = '\nPress [enter] to continue'
selection_error = '\nINVALID SELECTION! \nPress [enter]...'
when_to_start = 'WHEN DO YOU WANT TO START?'
ask_travel_pace = 'How fast do you want to travel?'
# Stopping points in the game
stops = (
'Independence, Missouri','Big Blue River Crossing','Fort Kearney',
'Chimney Rock','Fort Laramie','Independence Rock','South Pass',
'Fort Bridger','Green River Crossing','Soda Springs','Fort Hall',
'Snake River Crossing','Fort Boise','Grande Ronde in the Blue Mountains',
'Fort Walla Walla','The Dalles','Willamette Valley',)
# Months
months = (
'January','February','March','April','May','June','July',
'August','September','October','November','December',)
# Travel pace
travel_pace = (
'Steady','Strenuous','Grueling')
# Rations consumption
rations_consumption = (
'Filling','Meager','Bare Bones')
# Health status
health_status = (
'Good','Fair','Poor','Very Poor','Dying')
# Sickness
sickness = (
'is suffering from exhaustion.','is sick with typhoid fever.',
'has cholera.','has the measles.','has dysentery.','has a fever.')
# Other travelers and townspeople
people = (
'Zeke','Jed','Anna','Mary','Joey','Beth',
'John','Sara','Henry','Emily')
# Supply stops
supply_stops = (
'Matt\'s General Store','Fort Kearney','Fort Laramie',
'Fort Bridger','Fort Hall','Fort Boise','Fort Walla Wall')
# GLOBALS
LEADER = '' # Leader name
LEADER_HEALTH = 100 # Leader health
LEADER_SICK = False # Leader sickness
PL_2 = '' # Person 2 name
PL_2_HEALTH = 74 # Person 2 health
PL_2_SICK = False # Person 2 sickness
PL_3 = '' # Person 3 name
PL_3_HEALTH = 49 # Person 3 health
PL_3_SICK = True # Person 3 sickness
PL_4 = '' # Person 4 name
PL_4_HEALTH = 24 # Person 4 health
PL_4_SICK = True # Person 4 sickness
PL_5 = '' # Person 5 name
PL_5_HEALTH = 0 # Person 5 health
PL_5_SICK = False # Person 5 sickness
MONEY = 800 # Current balance
CLOTHES = 0 # Sets of clothes
CLOTHES_COST = 25 # Cost of clothes
FOOD = 0 # Pounds of food
FOOD_COST = 2 # Cost of food
AMMO = 0 # Rounds of ammunition
AMMO_COST = 1 # Cost of ammunition
OXEN = 0 # Qty of oxen
OXEN_COST = 25 # Cost of oxen
WHEELS = 0 # Qty of wagon wheels
WHEEL_COST = 10 # Cost of wagon wheels
AXLES = 0 # Qty of wagon axles
AXLE_COST = 25 # Cost of wagon axles
TONGUES = 0 # Qty of wagon tongues
TONGUE_COST = 50 # Cost of wagon tongues
MILES_TRAVELED = 0 # Miles traveled
TRAVEL_DAYS = 0 # Days traveled
REST_DAYS = 0 # Days spent resting
HUNT_DAYS= 0 # Days spent hunting
DIFFICULTY_LEVEL = 2
# FUNCTIONS
def clearConsole(): # Clear the console
os.system(command="clear")
def chooseDifficulty():
global DIFFICULTY_LEVEL
clearConsole()
print('CHOOSE A DIFFICULTY LEVEL\n')
print('-----' * 6) # Line break
print('[1] EASY')
print('[2] NORMAL')
print('[3] HARD')
print('-----' * 6) # Line break
print('[?] SHOW HELP\n')
print('SELECTION: ')
selection = input()
if selection == '1':
DIFFICULTY_LEVEL = 1
difficulty = 'EASY'
elif selection == '2':
DIFFICULTY_LEVEL = 2
difficulty = 'NORMAL'
elif selection == '3':
DIFFICULTY_LEVEL = 3
difficulty = 'HARD'
print('YOU CHOSE {}. ARE YOU SURE?'.format(difficulty))
response = input()
if response == 'n':
chooseDifficulty()
def tooltips():
clearConsole()
print('''\n
[MENU AND NAVIGATION]
---------------------------------------
THESE COMMANDS ARE COMMONLY USED WHILE
NAVIGATING THE GAMES MENUS, SETTINGS,
DIALOGUE SCREENS, AND WHEN THE USER
NEEDS TO INPUT AND/OR CONFIRM DATA.
ACTION(S) | [KEY]
----------------------------
YES/SKIP/NEXT | [ENTER]
RETURN | [B]
QUIT GAME | [Q]''')
input()
def welcomeMessage(): # Show welcome message
print('''
Welcome to The Oregon Trail!
--------------------------------------------------------------------------
You're about to begin a great adventure, traveling the Oregon Trail across
the rugged landscape of North America. Your covered wagon, pulled by a
team of oxen, will travel from Independence, Missouri, to the fertile
Willamette Valley of the Oregon Territory--a journey of approximately
2,000 miles.
Before you set off on the trail, register your name, the names of the
members of your wagon party, and your occupation. After that, you'll
need to buy supplies and make other important decisions.
--------------------------------------------------------------------------
press ENTER to continue''')
time.sleep(0.5)
input()
def getLeaderName(): # Get name of wagon party leader
global LEADER, LEADER_HEALTH, LEADER_SICK
clearConsole()
LEADER = input('ENTER THE NAME OF THE WAGON PARTY LEADER:\n')
print('SAVE LEADER AS {}?'.format(LEADER))
selection = input()
if selection == 'n':
getLeaderName()
# elif selection != 'n' or 'y':
# clearConsole()
# input(selection_error)
# getLeaderName()
def getPerson2Name(): # Get the name of person 2
global PL_2, PL_2_HEALTH, PL_2_SICK
clearConsole()
print('\nLEADER: {}'.format(LEADER))
PL_2 = input('\nENTER PERSON #2: ')
print('\nSAVE PERSON #2 AS {}?'.format(PL_2))
selection = input()
if selection == 'n':
getPerson2Name()
# elif selection != 'n' or 'y':
# clearConsole()
# input(selection_error)
# getPerson2Name()
def getPerson3Name(): # Get the name of person 3
global PL_3, PL_3_HEALTH, PL_3_SICK
clearConsole()
print('\nLEADER: {}'.format(LEADER))
print('PERSON #2: {}'.format(PL_2))
PL_3 = input('\nENTER PERSON #3: ')
print('\nSAVE PERSON #3 AS {}?'.format(PL_3))
selection = input()
if selection == 'n':
getPerson3Name()
# elif selection != 'n' or 'y':
# clearConsole()
# input(selection_error)
# getPerson3Name()
def getPerson4Name(): # Get the name of person 4
global PL_4, PL_4_HEALTH, PL_4_SICK
clearConsole()
print('\nLEADER: {}'.format(LEADER))
print('PERSON #2: {}'.format(PL_2))
print('PERSON #3: {}'.format(PL_3))
PL_4 = input('\nENTER PERSON #4: ')
print('\nSAVE PERSON #4 AS {}?'.format(PL_4))
selection = input()
if selection == 'n':
getPerson4Name()
# elif selection != 'n' or 'y':
# clearConsole()
# input(selection_error)
# getPerson4Name()
def getPerson5Name(): # Get the name of person 5
global PL_5, PL_5_HEALTH, PL_5_SICK
clearConsole()
print('\nLEADER: {}'.format(LEADER))
print('PERSON #2: {}'.format(PL_2))
print('PERSON #3: {}'.format(PL_3))
print('PERSON #4: {}'.format(PL_4))
PL_5 = input('\nENTER PERSON #5: ')
print('\nSAVE PERSON #5 AS {}?'.format(PL_5))
selection = input()
if selection == 'n':
getPerson5Name()
# elif selection != 'n' or 'y':
# clearConsole()
# input(selection_error)
# getPerson5Name()
def show_names(): # Show the names and current status of the wagon party
global LEADER, LEADER_HEALTH, LEADER_SICK,\
PL_2, PL_2_HEALTH, PL_2_SICK, PL_3, PL_3_HEALTH, PL_3_SICK,\
PL_4, PL_4_HEALTH, PL_4_SICK, PL_5, PL_5_HEALTH, PL_5_SICK
clearConsole() # Clear console
print('\nWAGON PARTY:') # Title
print(divider) # Divider
# Wagon party leader
print('LEADER: ' +LEADER)
if LEADER_HEALTH >= 76 and LEADER_HEALTH <= 100:
print('HEALTH STATUS: Excellent')
print('HEALTH AMOUNT: ' +str(LEADER_HEALTH) +'%')
elif LEADER_HEALTH >= 51 and LEADER_HEALTH <= 75:
print('HEALTH STATUS: Good')
print('HEALTH AMOUNT: ' +str(LEADER_HEALTH) +'%')
elif LEADER_HEALTH >= 25 and LEADER_HEALTH <= 50:
print('HEALTH STATUS: Fair')
print('HEALTH AMOUNT: ' +str(LEADER_HEALTH) +'%')
elif LEADER_HEALTH < 25 and LEADER_HEALTH >= 1:
print('HEALTH STATUS: Poor')
print('HEALTH AMOUNT: ' +str(LEADER_HEALTH) +'%')
elif LEADER_HEALTH | |
with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-3-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_2_nistxml_sv_iv_list_id_min_length_3_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-3-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_1_nistxml_sv_iv_list_id_min_length_2_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-2-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_1_nistxml_sv_iv_list_id_min_length_2_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-2-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_1_nistxml_sv_iv_list_id_min_length_2_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-2-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_1_nistxml_sv_iv_list_id_min_length_2_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-2-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_1_nistxml_sv_iv_list_id_min_length_2_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-2-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_nistxml_sv_iv_list_id_min_length_1_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-1-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_nistxml_sv_iv_list_id_min_length_1_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-1-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_nistxml_sv_iv_list_id_min_length_1_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-1-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_nistxml_sv_iv_list_id_min_length_1_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-1-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_min_length_nistxml_sv_iv_list_id_min_length_1_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-minLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-minLength-1-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_4_nistxml_sv_iv_list_id_max_length_5_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-5.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-5-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_4_nistxml_sv_iv_list_id_max_length_5_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-5.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-5-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_4_nistxml_sv_iv_list_id_max_length_5_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-5.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-5-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_4_nistxml_sv_iv_list_id_max_length_5_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-5.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-5-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_4_nistxml_sv_iv_list_id_max_length_5_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-5.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-5-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_3_nistxml_sv_iv_list_id_max_length_4_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-4.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-4-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_3_nistxml_sv_iv_list_id_max_length_4_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-4.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-4-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_3_nistxml_sv_iv_list_id_max_length_4_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-4.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-4-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_3_nistxml_sv_iv_list_id_max_length_4_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-4.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-4-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_3_nistxml_sv_iv_list_id_max_length_4_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-4.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-4-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_2_nistxml_sv_iv_list_id_max_length_3_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-3-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_2_nistxml_sv_iv_list_id_max_length_3_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-3-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_2_nistxml_sv_iv_list_id_max_length_3_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-3-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_2_nistxml_sv_iv_list_id_max_length_3_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-3-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_2_nistxml_sv_iv_list_id_max_length_3_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-3.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-3-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_nistxml_sv_iv_list_id_max_length_1_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-1-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_nistxml_sv_iv_list_id_max_length_1_2(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-1-2.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_nistxml_sv_iv_list_id_max_length_1_3(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-1-3.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_nistxml_sv_iv_list_id_max_length_1_4(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-1-4.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_id_max_length_nistxml_sv_iv_list_id_max_length_1_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-1-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_white_space_nistxml_sv_iv_list_ncname_white_space_1_1(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-whiteSpace-1.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-whiteSpace-1-1.xml",
class_name="NistschemaSvIvListNcnameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_white_space_nistxml_sv_iv_list_ncname_white_space_1_2(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-whiteSpace-1.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-whiteSpace-1-2.xml",
class_name="NistschemaSvIvListNcnameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_white_space_nistxml_sv_iv_list_ncname_white_space_1_3(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-whiteSpace-1.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-whiteSpace-1-3.xml",
class_name="NistschemaSvIvListNcnameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_white_space_nistxml_sv_iv_list_ncname_white_space_1_4(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-whiteSpace-1.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-whiteSpace-1-4.xml",
class_name="NistschemaSvIvListNcnameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_white_space_nistxml_sv_iv_list_ncname_white_space_1_5(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-whiteSpace-1.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-whiteSpace-1-5.xml",
class_name="NistschemaSvIvListNcnameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_4_nistxml_sv_iv_list_ncname_enumeration_5_1(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-5-1.xml",
class_name="NistschemaSvIvListNcnameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_4_nistxml_sv_iv_list_ncname_enumeration_5_2(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-5-2.xml",
class_name="NistschemaSvIvListNcnameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_4_nistxml_sv_iv_list_ncname_enumeration_5_3(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-5-3.xml",
class_name="NistschemaSvIvListNcnameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_4_nistxml_sv_iv_list_ncname_enumeration_5_4(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-5-4.xml",
class_name="NistschemaSvIvListNcnameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_4_nistxml_sv_iv_list_ncname_enumeration_5_5(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-5.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-5-5.xml",
class_name="NistschemaSvIvListNcnameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_3_nistxml_sv_iv_list_ncname_enumeration_4_1(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-4-1.xml",
class_name="NistschemaSvIvListNcnameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_3_nistxml_sv_iv_list_ncname_enumeration_4_2(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-4-2.xml",
class_name="NistschemaSvIvListNcnameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_3_nistxml_sv_iv_list_ncname_enumeration_4_3(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-4-3.xml",
class_name="NistschemaSvIvListNcnameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_3_nistxml_sv_iv_list_ncname_enumeration_4_4(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-4-4.xml",
class_name="NistschemaSvIvListNcnameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_3_nistxml_sv_iv_list_ncname_enumeration_4_5(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-4-5.xml",
class_name="NistschemaSvIvListNcnameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_ncname_enumeration_2_nistxml_sv_iv_list_ncname_enumeration_3_1(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-enumeration-3.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-enumeration-3-1.xml",
class_name="NistschemaSvIvListNcnameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
| |
<gh_stars>10-100
import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torchvision
import os.path
import time
import PIL
import matplotlib.pyplot as plt
import copy
# import skimage
# import skimage.io
MEAN = [0.485, 0.456, 0.406] # expected by pretrained resnet18
STD = [0.229, 0.224, 0.225] # expected by pretrained resnet18
class WhaleDataset(torch.utils.data.Dataset):
def __init__(self,data_frame,data_dir,transform=None):
'''
Parameters
----------
data_frame: (pandas data_frame) contains the names of the images and
the corresponding labels in the 0th and 1st column
data_dir: (string) path to the data directory
__getitem__ method returns
--------------------------
image: (PIL image) .
label: (integer) label for the image
Note
----
1. To convert integer labels to strings use the categories attribute
2. Use pytorch transform.ToTensor() to transform the images.
'''
# data_frame = pandas.read_csv(os.path.join(data_dir,csv_file))
categories = list(set(c for c in data_frame.iloc[:,1]))
categories.sort()
self.data_frame = data_frame
self.data_dir = data_dir
self.categories = categories
self.transform = transform
def __len__(self):
return self.data_frame.shape[0]
def __getitem__(self, index):
image_name = os.path.join(self.data_dir,'train',\
self.data_frame.iloc[index,0])
image = PIL.Image.open(image_name)
# some of the images are gray-scale only. Convert them to RGB. This
# basically just copies the image 3 times.
if image.mode != 'RGB':
image = image.convert('RGB')
if self.transform != None:
image = self.transform(image)
label = self.categories.index(self.data_frame.iloc[index,1])
return image, label
class TestDataset(torch.utils.data.Dataset):
def __init__(self,data_frame,data_dir,transform=None):
'''
Parameters
----------
data_frame: (pandas data_frame) contains the names of the images in the
first column.
data_dir: (string) path to the test-data directory.
__getitem__ method returns
--------------------------
image: (PIL image) .
label: (integer) label for the image
Note
----
1. To convert integer labels to strings use the categories attribute
2. Use pytorch transform.ToTensor() to transform the images.
3. The data_dir should point directly to the test data. For the
WhaleDataset on the other hand 'train' is appended to the data_dir.
'''
self.data_frame = data_frame
self.data_dir = data_dir
self.transform = transform
def __len__(self):
return self.data_frame.shape[0]
def __getitem__(self, index):
image_name = self.data_frame.iloc[index,0]
full_image_name = os.path.join(self.data_dir,image_name)
image = PIL.Image.open(full_image_name)
# some of the images are gray-scale only. Convert them to RGB. This
# basically just copies the image 3 times.
if image.mode != 'RGB':
image = image.convert('RGB')
if self.transform != None:
image = self.transform(image)
return image, image_name
def predict(model,test_dataloader,device,categories,verbose=True):
'''Still need to test this
Parameters
----------
model: trained model
test_dataloader: pytorch dataloader for test-data
device: torch.device
categories: list with categories used for training, i.e.
taining_dataset.categories
Returns
-------
df: pandas DataFrame with the top 5 predictions in the required format.
'''
start_time = time.time()
Id_vals = []
Image_vals = []
cols = ['Image', 'Id']
# iterate over data
for images, image_names in test_dataloader:
images = images.to(device)
#forward
out = model(images)
_, preds = torch.topk(out,k=5,dim=1)
# torch.cat((image_names,preds),dim=1)
preds = preds.tolist()
# Note image_names is a tuple. The final batch may have a
# different length. That is why I am using the length of image_names as
# in xrange below.
for j in xrange(len(image_names)):
Image_vals.append(image_names[j])
Id_vals.append(''.join([categories[i]+' ' for i in preds[j]]))
df = pd.DataFrame({'Image':Image_vals,\
'Id':Id_vals})
# make sure that image is column 0 and Id is column 1
df = df.reindex(columns=['Image','Id'])
end_time = time.time()
if verbose:
print 'Elapsed time: {:.4f}'.format(end_time - start_time)
return df
def predict_v2(model,test_dataloader,device,categories,verbose=True):
'''Work in progress
'''
start_time = time.time()
Id_vals = []
Prob_vals = []
Image_vals = []
cols = ['Image', 'Prediction']
# iterate over data
for images, image_names in test_dataloader:
images = images.to(device)
#forward
out = model(images)
out = torch.exp(out)
norm = torch.norm(out,p=1,dim=1,keepdim=True)
out = out/norm
pred_val, pred_ind = torch.topk(out,k=20,dim=1)
pred_val = pred_val.tolist()
pred_ind = pred_ind.tolist()
# Note image_names is a tuple. The final batch may have a
# different length. That is why I am using the length of image_names
# in xrange below.
for j in xrange(len(image_names)):
Image_vals.append(image_names[j])
Id_vals.append([categories[i] for i in pred_ind[j]])
Prob_vals.append([p for p in pred_val[j]])
df_Id = pd.DataFrame({'Image':Image_vals,\
'Id':Id_vals})
df_prob = pd.DataFrame({'Image':Image_vals,\
'Probability':Prob_vals})
# make sure that image is column 0 and Id is column 1
df_Id = df_Id.reindex(columns=['Image','Id'])
df_prob = df_prob.reindex(columns=['Image','Probability'])
end_time = time.time()
if verbose:
print 'Elapsed time: {:.4f}'.format(end_time - start_time)
return df_Id, df_prob
def _val_loop(model,dataloader,criterion,device):
'''
Helper function implementing the validation loop
Parameters
----------
model: the pytorch model to be trained
dataloader_dict: dictionary with keys 'train' and 'val' (optional)
containing pytorch DataLoader instances.
criterion: The pytorch criterion for the loss (loss-function)
device: torch.device
Returns
-------
loss: (float) the loss
acc: (float) the accuracy
'''
size = float(len(dataloader.sampler))
running_loss = 0.0
running_corrects = 0.0
# iterate over data
for images, labels in dataloader:
images = images.to(device)
labels = labels.to(device)
#forward
out = model(images)
preds = torch.argmax(out,1)
loss = criterion(out,labels)
running_loss += loss.item() * images.size(0)
running_corrects += torch.sum(preds == labels.data)
loss = running_loss / size
acc = float(running_corrects) / size
return loss, acc
def _train_loop(model,dataloader,criterion,optimizer,device):
'''
Helper function implementing the validation loop
Parameters
----------
model: the pytorch model to be trained
dataloader_dict: dictionary with keys 'train' and 'val' (optional)
containing pytorch DataLoader instances.
optimizer: pytorch Optimizer instance.
criterion: The pytorch criterion for the loss (loss-function)
device: torch.device
Returns
-------
loss: (float) the loss
acc: (float) the accuracy
'''
size = float(len(dataloader.sampler))
running_loss = 0.0
running_corrects = 0.0
# iterate over data
for images, labels in dataloader:
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# track history only when in train
with torch.set_grad_enabled(True):
#forward
out = model(images)
preds = torch.argmax(out,1)
loss = criterion(out,labels)
# backward
loss.backward()
optimizer.step()
running_loss += loss.item() * images.size(0)
running_corrects += torch.sum(preds == labels.data)
loss = running_loss / size
acc = float(running_corrects) / size
return loss, acc
def find_learning_rate(model,dataloader,criterion,optimizer,device,\
lr_vals=None,verbose=True):
'''Train the model while increasing the learning rate. Use this function to
find a good value for the initial learning rate. This is a trick I learned
from the fast.ai MOOC.
Parameters
----------
model: the pytorch model to be trained
dataloader: pytorch DataLoader instance.
criterion: The pytorch criterion for the loss (loss-function)
optimizer: pytorch Optimizer instance.
device: torch.device
lr_vals: list with values for the learning rate if None is passed a default
list is used.
verbose: (default + True) whether to print status updates
Returns
-------
best_model: best model (according to validation accuracy or training
accuracy if use_val is False)
lr_vals: list with the learning rate values. If lr_vals=None is passed the
default learning ratevalues are returned.
loss_vals: list with loss values
acc_vals: list with accuracy values
Note
----
Check whether the behavior corresponds to model being passed "as copy" or
"by reference"
'''
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
# default values for lr_vals
if lr_vals == None:
lr_vals = list(0.0001*1.2**np.array(range(18)))
loss_vals = []
acc_vals = []
t_start = time.time()
for lr in lr_vals:
# set the learning rate of the optimizer
for param in optimizer.param_groups:
param['lr'] = lr
model.train() # Set model to training mode
loss, acc = _train_loop(model,dataloader,criterion,optimizer,device)
loss_vals.append(loss)
acc_vals.append(acc)
t = time.time()
if verbose:
print 'Elapsed time: {:.4f} lr: {:.4e} Loss: {:.4f} Acc: {:.4f}'.\
format(t - t_start, lr, loss, acc)
if acc > best_acc:
best_acc = acc
best_model_wts = copy.deepcopy(model.state_dict())
t_final = time.time()
if verbose:
print 'Total time {:.4f}'.format(t_final - t_start)
model.load_state_dict(best_model_wts)
return model, lr_vals, loss_vals, acc_vals
def train_with_restart(model,dataloader_dict,criterion,optimizer,device,\
T_max=10,num_epochs=20,verbose=True,use_val=True,\
save_prog=True):
'''Train the model with a cosine-shaped learning rate annealer and restarts
after T_max epochs. This is a trick I picked up from the fast.ai MOOC.
Parameters
----------
model: the pytorch model to be trained
dataloader_dict: dictionary with keys 'train' and 'val' (optional)
containing pytorch DataLoader instances.
criterion: The pytorch criterion for the loss (loss-function)
optimizer: pytorch Optimizer instance.
device: torch.device
T_max: (default = 10) learning rate is reset to initial value after T_max
epochs.
num_epochs: (default = 20)
verbose: (default + True) whether to print status updates
use_val: (default = True) set to False if no validation Dataloader is
passed.
save_prog: (default=True) periodically save the model to the file
train_with_restart_progress.pt
Returns
-------
best_model: best model (according to validation accuracy or training
accuracy if use_val is False)
loss: dict with loss values
acc: dict with accuracy values
Note
----
Check whether the behavior corresponds to model being passed "as copy" or
"by reference"
'''
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max)
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
| |
m.b1007
<= 0.690184503917672)
m.e1050 = Constraint(expr= m.x804 + 0.690184503917672 * m.b1008
<= 0.690184503917672)
m.e1051 = Constraint(expr= m.x865 - 0.367386387824208 * m.b1006 <= 0)
m.e1052 = Constraint(expr= m.x866 - 0.367386387824208 * m.b1007 <= 0)
m.e1053 = Constraint(expr= m.x867 - 0.367386387824208 * m.b1008 <= 0)
m.e1054 = Constraint(expr= m.x868 + 0.367386387824208 * m.b1006
<= 0.367386387824208)
m.e1055 = Constraint(expr= m.x869 + 0.367386387824208 * m.b1007
<= 0.367386387824208)
m.e1056 = Constraint(expr= m.x870 + 0.367386387824208 * m.b1008
<= 0.367386387824208)
m.e1057 = Constraint(expr= (m.x871 / (0.001 + 0.999 * m.b1009) - 0.65 * log(
m.x805 / (0.001 + 0.999 * m.b1009) + 1)) * (0.001 + 0.999 * m.b1009) <= 0)
m.e1058 = Constraint(expr= (m.x872 / (0.001 + 0.999 * m.b1010) - 0.65 * log(
m.x806 / (0.001 + 0.999 * m.b1010) + 1)) * (0.001 + 0.999 * m.b1010) <= 0)
m.e1059 = Constraint(expr= (m.x873 / (0.001 + 0.999 * m.b1011) - 0.65 * log(
m.x807 / (0.001 + 0.999 * m.b1011) + 1)) * (0.001 + 0.999 * m.b1011) <= 0)
m.e1060 = Constraint(expr= (m.x871 / (0.001 + 0.999 * m.b1009) - 0.65 * log(
m.x817 / (0.001 + 0.999 * m.b1009) + 1)) * (0.001 + 0.999 * m.b1009) <= 0)
m.e1061 = Constraint(expr= (m.x872 / (0.001 + 0.999 * m.b1010) - 0.65 * log(
m.x818 / (0.001 + 0.999 * m.b1010) + 1)) * (0.001 + 0.999 * m.b1010) <= 0)
m.e1062 = Constraint(expr= (m.x873 / (0.001 + 0.999 * m.b1011) - 0.65 * log(
m.x819 / (0.001 + 0.999 * m.b1011) + 1)) * (0.001 + 0.999 * m.b1011) <= 0)
m.e1063 = Constraint(expr= m.x808 == 0)
m.e1064 = Constraint(expr= m.x809 == 0)
m.e1065 = Constraint(expr= m.x810 == 0)
m.e1066 = Constraint(expr= m.x820 == 0)
m.e1067 = Constraint(expr= m.x821 == 0)
m.e1068 = Constraint(expr= m.x822 == 0)
m.e1069 = Constraint(expr= m.x874 == 0)
m.e1070 = Constraint(expr= m.x875 == 0)
m.e1071 = Constraint(expr= m.x876 == 0)
m.e1072 = Constraint(expr= m.x337 - m.x805 - m.x808 == 0)
m.e1073 = Constraint(expr= m.x338 - m.x806 - m.x809 == 0)
m.e1074 = Constraint(expr= m.x339 - m.x807 - m.x810 == 0)
m.e1075 = Constraint(expr= m.x346 - m.x817 - m.x820 == 0)
m.e1076 = Constraint(expr= m.x347 - m.x818 - m.x821 == 0)
m.e1077 = Constraint(expr= m.x348 - m.x819 - m.x822 == 0)
m.e1078 = Constraint(expr= m.x373 - m.x871 - m.x874 == 0)
m.e1079 = Constraint(expr= m.x374 - m.x872 - m.x875 == 0)
m.e1080 = Constraint(expr= m.x375 - m.x873 - m.x876 == 0)
m.e1081 = Constraint(expr= m.x805 - 0.690184503917672 * m.b1009 <= 0)
m.e1082 = Constraint(expr= m.x806 - 0.690184503917672 * m.b1010 <= 0)
m.e1083 = Constraint(expr= m.x807 - 0.690184503917672 * m.b1011 <= 0)
m.e1084 = Constraint(expr= m.x808 + 0.690184503917672 * m.b1009
<= 0.690184503917672)
m.e1085 = Constraint(expr= m.x809 + 0.690184503917672 * m.b1010
<= 0.690184503917672)
m.e1086 = Constraint(expr= m.x810 + 0.690184503917672 * m.b1011
<= 0.690184503917672)
m.e1087 = Constraint(expr= m.x817 - 38.5 * m.b1009 <= 0)
m.e1088 = Constraint(expr= m.x818 - 38.5 * m.b1010 <= 0)
m.e1089 = Constraint(expr= m.x819 - 38.5 * m.b1011 <= 0)
m.e1090 = Constraint(expr= m.x820 + 38.5 * m.b1009 <= 38.5)
m.e1091 = Constraint(expr= m.x821 + 38.5 * m.b1010 <= 38.5)
m.e1092 = Constraint(expr= m.x822 + 38.5 * m.b1011 <= 38.5)
m.e1093 = Constraint(expr= m.x871 - 2.3895954367396 * m.b1009 <= 0)
m.e1094 = Constraint(expr= m.x872 - 2.3895954367396 * m.b1010 <= 0)
m.e1095 = Constraint(expr= m.x873 - 2.3895954367396 * m.b1011 <= 0)
m.e1096 = Constraint(expr= m.x874 + 2.3895954367396 * m.b1009
<= 2.3895954367396)
m.e1097 = Constraint(expr= m.x875 + 2.3895954367396 * m.b1010
<= 2.3895954367396)
m.e1098 = Constraint(expr= m.x876 + 2.3895954367396 * m.b1011
<= 2.3895954367396)
m.e1099 = Constraint(expr= -m.x823 + m.x877 == 0)
m.e1100 = Constraint(expr= -m.x824 + m.x878 == 0)
m.e1101 = Constraint(expr= -m.x825 + m.x879 == 0)
m.e1102 = Constraint(expr= m.x826 == 0)
m.e1103 = Constraint(expr= m.x827 == 0)
m.e1104 = Constraint(expr= m.x828 == 0)
m.e1105 = Constraint(expr= m.x880 == 0)
m.e1106 = Constraint(expr= m.x881 == 0)
m.e1107 = Constraint(expr= m.x882 == 0)
m.e1108 = Constraint(expr= m.x349 - m.x823 - m.x826 == 0)
m.e1109 = Constraint(expr= m.x350 - m.x824 - m.x827 == 0)
m.e1110 = Constraint(expr= m.x351 - m.x825 - m.x828 == 0)
m.e1111 = Constraint(expr= m.x376 - m.x877 - m.x880 == 0)
m.e1112 = Constraint(expr= m.x377 - m.x878 - m.x881 == 0)
m.e1113 = Constraint(expr= m.x378 - m.x879 - m.x882 == 0)
m.e1114 = Constraint(expr= m.x823 - 9 * m.b1012 <= 0)
m.e1115 = Constraint(expr= m.x824 - 9 * m.b1013 <= 0)
m.e1116 = Constraint(expr= m.x825 - 9 * m.b1014 <= 0)
m.e1117 = Constraint(expr= m.x826 + 9 * m.b1012 <= 9)
m.e1118 = Constraint(expr= m.x827 + 9 * m.b1013 <= 9)
m.e1119 = Constraint(expr= m.x828 + 9 * m.b1014 <= 9)
m.e1120 = Constraint(expr= m.x877 - 9 * m.b1012 <= 0)
m.e1121 = Constraint(expr= m.x878 - 9 * m.b1013 <= 0)
m.e1122 = Constraint(expr= m.x879 - 9 * m.b1014 <= 0)
m.e1123 = Constraint(expr= m.x880 + 9 * m.b1012 <= 9)
m.e1124 = Constraint(expr= m.x881 + 9 * m.b1013 <= 9)
m.e1125 = Constraint(expr= m.x882 + 9 * m.b1014 <= 9)
m.e1126 = Constraint(expr= -m.x829 + m.x883 == 0)
m.e1127 = Constraint(expr= -m.x830 + m.x884 == 0)
m.e1128 = Constraint(expr= -m.x831 + m.x885 == 0)
m.e1129 = Constraint(expr= m.x832 == 0)
m.e1130 = Constraint(expr= m.x833 == 0)
m.e1131 = Constraint(expr= m.x834 == 0)
m.e1132 = Constraint(expr= m.x886 == 0)
m.e1133 = Constraint(expr= m.x887 == 0)
m.e1134 = Constraint(expr= m.x888 == 0)
m.e1135 = Constraint(expr= m.x352 - m.x829 - m.x832 == 0)
m.e1136 = Constraint(expr= m.x353 - m.x830 - m.x833 == 0)
m.e1137 = Constraint(expr= m.x354 - m.x831 - m.x834 == 0)
m.e1138 = Constraint(expr= m.x379 - m.x883 - m.x886 == 0)
m.e1139 = Constraint(expr= m.x380 - m.x884 - m.x887 == 0)
m.e1140 = Constraint(expr= m.x381 - m.x885 - m.x888 == 0)
m.e1141 = Constraint(expr= m.x829 - 9 * m.b1015 <= 0)
m.e1142 = Constraint(expr= m.x830 - 9 * m.b1016 <= 0)
m.e1143 = Constraint(expr= m.x831 - 9 * m.b1017 <= 0)
m.e1144 = Constraint(expr= m.x832 + 9 * m.b1015 <= 9)
m.e1145 = Constraint(expr= m.x833 + 9 * m.b1016 <= 9)
m.e1146 = Constraint(expr= m.x834 + 9 * m.b1017 <= 9)
m.e1147 = Constraint(expr= m.x883 - 9 * m.b1015 <= 0)
m.e1148 = Constraint(expr= m.x884 - 9 * m.b1016 <= 0)
m.e1149 = Constraint(expr= m.x885 - 9 * m.b1017 <= 0)
m.e1150 = Constraint(expr= m.x886 + 9 * m.b1015 <= 9)
m.e1151 = Constraint(expr= m.x887 + 9 * m.b1016 <= 9)
m.e1152 = Constraint(expr= m.x888 + 9 * m.b1017 <= 9)
m.e1153 = Constraint(expr= (m.x889 / (0.001 + 0.999 * m.b1018) - 0.75 * log(
m.x835 / (0.001 + 0.999 * m.b1018) + 1)) * (0.001 + 0.999 * m.b1018) <= 0)
m.e1154 = Constraint(expr= (m.x890 / (0.001 + 0.999 * m.b1019) - 0.75 * log(
m.x836 / (0.001 + 0.999 * m.b1019) + 1)) * (0.001 + 0.999 * m.b1019) <= 0)
m.e1155 = Constraint(expr= (m.x891 / (0.001 + 0.999 * m.b1020) - 0.75 * log(
m.x837 / (0.001 + 0.999 * m.b1020) + 1)) * (0.001 + 0.999 * m.b1020) <= 0)
m.e1156 = Constraint(expr= m.x838 == 0)
m.e1157 = Constraint(expr= m.x839 == 0)
m.e1158 = Constraint(expr= m.x840 == 0)
m.e1159 = Constraint(expr= m.x892 == 0)
m.e1160 = Constraint(expr= m.x893 == 0)
m.e1161 = Constraint(expr= m.x894 == 0)
m.e1162 = Constraint(expr= m.x355 - m.x835 - m.x838 == 0)
m.e1163 = Constraint(expr= m.x356 - m.x836 - m.x839 == 0)
m.e1164 = Constraint(expr= m.x357 - m.x837 - m.x840 == 0)
m.e1165 = Constraint(expr= m.x382 - m.x889 - m.x892 == 0)
m.e1166 = Constraint(expr= m.x383 - m.x890 - m.x893 == 0)
m.e1167 = Constraint(expr= m.x384 - m.x891 - m.x894 == 0)
m.e1168 = Constraint(expr= m.x835 - 3.04984759446376 * m.b1018 <= 0)
m.e1169 = Constraint(expr= m.x836 - 3.04984759446376 * m.b1019 <= 0)
m.e1170 = Constraint(expr= m.x837 - 3.04984759446376 * m.b1020 <= 0)
m.e1171 = Constraint(expr= m.x838 + 3.04984759446376 * m.b1018
<= 3.04984759446376)
m.e1172 = Constraint(expr= m.x839 + 3.04984759446376 * m.b1019
<= 3.04984759446376)
m.e1173 = Constraint(expr= m.x840 + 3.04984759446376 * m.b1020
<= 3.04984759446376)
m.e1174 = Constraint(expr= m.x889 - 1.04900943706034 * m.b1018 <= 0)
m.e1175 = Constraint(expr= m.x890 - 1.04900943706034 * m.b1019 <= 0)
m.e1176 = Constraint(expr= m.x891 - 1.04900943706034 * m.b1020 <= 0)
m.e1177 = Constraint(expr= m.x892 + 1.04900943706034 * m.b1018
<= 1.04900943706034)
m.e1178 = Constraint(expr= m.x893 + 1.04900943706034 * m.b1019
<= 1.04900943706034)
m.e1179 = Constraint(expr= m.x894 + 1.04900943706034 * m.b1020
<= 1.04900943706034)
m.e1180 = Constraint(expr= (m.x895 / (0.001 + 0.999 * m.b1021) - 0.8 * log(
m.x841 / (0.001 + 0.999 * m.b1021) + 1)) * (0.001 + 0.999 * m.b1021) <= 0)
m.e1181 = Constraint(expr= (m.x896 / (0.001 + 0.999 * | |
<reponame>linuxaddict89/luminos
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2019 <NAME> (The Compiler) <<EMAIL>>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom useful data types.
Module attributes:
_UNSET: Used as default argument in the constructor so default can be None.
"""
import operator
import collections.abc
import enum
import attr
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QTimer
from luminos.utils import log, qtutils, utils
_UNSET = object()
class NeighborList(collections.abc.Sequence):
"""A list of items which saves its current position.
Class attributes:
Modes: Different modes, see constructor documentation.
Attributes:
fuzzyval: The value which is currently set but not in the list.
_idx: The current position in the list.
_items: A list of all items, accessed through item property.
_mode: The current mode.
"""
Modes = enum.Enum("Modes", ["edge", "exception"])
def __init__(self, items=None, default=_UNSET, mode=Modes.exception):
"""Constructor.
Args:
items: The list of items to iterate in.
_default: The initially selected value.
_mode: Behavior when the first/last item is reached.
Modes.edge: Go to the first/last item
Modes.exception: Raise an IndexError.
"""
if not isinstance(mode, self.Modes):
raise TypeError("Mode {} is not a Modes member!".format(mode))
if items is None:
self._items = []
else:
self._items = list(items)
self._default = default
if default is not _UNSET:
self._idx = self._items.index(default)
else:
self._idx = None
self._mode = mode
self.fuzzyval = None
def __getitem__(self, key):
return self._items[key]
def __len__(self):
return len(self._items)
def __repr__(self):
return utils.get_repr(
self,
items=self._items,
mode=self._mode,
idx=self._idx,
fuzzyval=self.fuzzyval,
)
def _snap_in(self, offset):
"""Set the current item to the closest item to self.fuzzyval.
Args:
offset: negative to get the next smaller item, positive for the
next bigger one.
Return:
True if the value snapped in (changed),
False when the value already was in the list.
"""
op = operator.le if offset < 0 else operator.ge
items = [
(idx, e) for (idx, e) in enumerate(self._items) if op(e, self.fuzzyval)
]
if items:
item = min(items, key=lambda tpl: abs(self.fuzzyval - tpl[1]))
else:
sorted_items = sorted(
((idx, e) for (idx, e) in enumerate(self.items)), key=lambda e: e[1]
)
idx = 0 if offset < 0 else -1
item = sorted_items[idx]
self._idx = item[0]
return self.fuzzyval not in self._items
def _get_new_item(self, offset):
"""Logic for getitem to get the item at offset.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
try:
if self._idx + offset >= 0:
new = self._items[self._idx + offset]
else:
raise IndexError
except IndexError:
if self._mode == self.Modes.edge:
assert offset != 0
if offset > 0:
new = self.lastitem()
else:
new = self.firstitem()
elif self._mode == self.Modes.exception: # pragma: no branch
raise
else:
self._idx += offset
return new
@property
def items(self):
"""Getter for items, which should not be set."""
return self._items
def getitem(self, offset):
"""Get the item with a relative position.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
log.misc.debug(
"{} items, idx {}, offset {}".format(len(self._items), self._idx, offset)
)
if not self._items:
raise IndexError("No items found!")
if self.fuzzyval is not None:
# Value has been set to something not in the list, so we snap in to
# the closest value in the right direction and count this as one
# step towards offset.
snapped = self._snap_in(offset)
if snapped and offset > 0:
offset -= 1
elif snapped:
offset += 1
self.fuzzyval = None
return self._get_new_item(offset)
def curitem(self):
"""Get the current item in the list."""
if self._idx is not None:
return self._items[self._idx]
else:
raise IndexError("No current item!")
def nextitem(self):
"""Get the next item in the list."""
return self.getitem(1)
def previtem(self):
"""Get the previous item in the list."""
return self.getitem(-1)
def firstitem(self):
"""Get the first item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = 0
return self.curitem()
def lastitem(self):
"""Get the last item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = len(self._items) - 1
return self.curitem()
def reset(self):
"""Reset the position to the default."""
if self._default is _UNSET:
raise ValueError("No default set!")
self._idx = self._items.index(self._default)
return self.curitem()
# The mode of a Question.
PromptMode = enum.Enum("PromptMode", ["yesno", "text", "user_pwd", "alert", "download"])
class WindowState(object):
width = 0
height = 0
maximized = False
def __init__(self, width, height, maximized=False):
super().__init__()
self.width = width
self.height = height
self.maximized = maximized
class ClickTarget(enum.Enum):
"""How to open a clicked link."""
normal = 0 #: Open the link in the current tab
tab = 1 #: Open the link in a new foreground tab
tab_bg = 2 #: Open the link in a new background tab
window = 3 #: Open the link in a new window
hover = 4 #: Only hover over the link
class KeyMode(enum.Enum):
"""Key input modes."""
normal = 1 #: Normal mode (no mode was entered)
hint = 2 #: Hint mode (showing labels for links)
command = 3 #: Command mode (after pressing the colon key)
yesno = 4 #: Yes/No prompts
prompt = 5 #: Text prompts
insert = 6 #: Insert mode (passing through most keys)
passthrough = 7 #: Passthrough mode (passing through all keys)
caret = 8 #: Caret mode (moving cursor with keys)
set_mark = 9
jump_mark = 10
record_macro = 11
run_macro = 12
class Exit(enum.IntEnum):
"""Exit statuses for errors. Needs to be an int for sys.exit."""
ok = 0
reserved = 1
exception = 2
err_ipc = 3
err_init = 4
# Load status of a tab
LoadStatus = enum.Enum(
"LoadStatus", ["none", "success", "success_https", "error", "warn", "loading"]
)
LoadEvent = enum.Enum(
"LoadEvent", ["FINISHED", "STARTED", "BEFORE_LOAD"]
)
# Backend of a tab
Backend = enum.Enum("Backend", ["QtWebKit", "QtWebEngine"])
class JsWorld(enum.Enum):
"""World/context to run JavaScript code in."""
main = 1 #: Same world as the web page's JavaScript.
application = 2 #: Application world, used by qutebrowser internally.
user = 3 #: User world, currently not used.
jseval = 4 #: World used for the jseval-command.
# Log level of a JS message. This needs to match up with the keys allowed for
# the content.javascript.log setting.
JsLogLevel = enum.Enum("JsLogLevel", ["unknown", "info", "warning", "error"])
MessageLevel = enum.Enum("MessageLevel", ["error", "warning", "info"])
IgnoreCase = enum.Enum("IgnoreCase", ["smart", "never", "always"])
class CommandValue(enum.Enum):
"""Special values which are injected when running a command handler."""
count = 1
win_id = 2
cur_tab = 3
count_tab = 4
class Question(QObject):
"""A question asked to the user, e.g. via the status bar.
Note the creator is responsible for cleaning up the question after it
doesn't need it anymore, e.g. via connecting Question.completed to
Question.deleteLater.
Attributes:
mode: A PromptMode enum member.
yesno: A question which can be answered with yes/no.
text: A question which requires a free text answer.
user_pwd: A question for a username and password.
default: The default value.
For yesno, None (no default), True or False.
For text, a default text as string.
For user_pwd, a default username as string.
title: The question title to show.
text: The prompt text to display to the user.
url: Any URL referenced in prompts.
answer: The value the user entered (as password for user_pwd).
is_aborted: Whether the question was aborted.
interrupted: Whether the question was interrupted by another one.
Signals:
answered: Emitted when the question has been answered by the user.
arg: The answer to the question.
cancelled: Emitted when the question has been cancelled by the user.
aborted: Emitted when the question was aborted programmatically.
In this case, cancelled is not emitted.
answered_yes: Convenience signal emitted when a yesno question was
answered with yes.
answered_no: Convenience signal emitted when a yesno question was
| |
'lines': Id,
'branch': Branch,
'dcache': DcacheAccess,
'counts': Counts
}
indexed_decoder_element_classes = {
'indexedCounts' : Counts
}
def find_colour_decoder(stripSpace, decoderName, dataName, picPairs):
"""Make a colour decoder from some picture file blob attributes"""
if decoderName == 'frame':
return FrameColours.decoder(Counts, stripSpace, dataName)
elif decoderName in decoder_element_classes:
return TwoDColours.decoder(decoder_element_classes[decoderName],
dataName)
elif decoderName in indexed_decoder_element_classes:
return TwoDColours.indexed_decoder(
indexed_decoder_element_classes[decoderName], dataName, picPairs)
else:
return None
class IdedObj(object):
"""An object identified by an Id carrying paired data.
The super class for Inst and Line"""
def __init__(self, id, pairs={}):
self.id = id
self.pairs = pairs
def __cmp__(self, right):
return cmp(self.id, right.id)
def table_line(self):
"""Represent the object as a list of table row data"""
return []
# FIXME, add a table column titles?
def __repr__(self):
return ' '.join(self.table_line())
class Inst(IdedObj):
"""A non-fault instruction"""
def __init__(self, id, disassembly, addr, pairs={}):
super(Inst,self).__init__(id, pairs)
if 'nextAddr' in pairs:
self.nextAddr = int(pairs['nextAddr'], 0)
del pairs['nextAddr']
else:
self.nextAddr = None
self.disassembly = disassembly
self.addr = addr
def table_line(self):
if self.nextAddr is not None:
addrStr = '0x%x->0x%x' % (self.addr, self.nextAddr)
else:
addrStr = '0x%x' % self.addr
ret = [addrStr, self.disassembly]
for name, value in self.pairs.iteritems():
ret.append("%s=%s" % (name, str(value)))
return ret
class InstFault(IdedObj):
"""A fault instruction"""
def __init__(self, id, fault, addr, pairs={}):
super(InstFault,self).__init__(id, pairs)
self.fault = fault
self.addr = addr
def table_line(self):
ret = ["0x%x" % self.addr, self.fault]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class Line(IdedObj):
"""A fetched line"""
def __init__(self, id, vaddr, paddr, size, pairs={}):
super(Line,self).__init__(id, pairs)
self.vaddr = vaddr
self.paddr = paddr
self.size = size
def table_line(self):
ret = ["0x%x/0x%x" % (self.vaddr, self.paddr), "%d" % self.size]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class LineFault(IdedObj):
"""A faulting line"""
def __init__(self, id, fault, vaddr, pairs={}):
super(LineFault,self).__init__(id, pairs)
self.vaddr = vaddr
self.fault = fault
def table_line(self):
ret = ["0x%x" % self.vaddr, self.fault]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class BlobEvent(object):
"""Time event for a single blob"""
def __init__(self, unit, time, pairs = {}):
# blob's unit name
self.unit = unit
self.time = time
# dict of picChar (blob name) to visual data
self.visuals = {}
# Miscellaneous unparsed MinorTrace line data
self.pairs = pairs
# Non-MinorTrace debug printout for this unit at this time
self.comments = []
def find_ided_objects(self, model, picChar, includeInstLines):
"""Find instructions/lines mentioned in the blob's event
data"""
ret = []
if picChar in self.visuals:
blocks = self.visuals[picChar].elems()
def find_inst(data):
instId = data.get_inst()
lineId = data.get_line()
if instId is not None:
inst = model.find_inst(instId)
line = model.find_line(instId)
if inst is not None:
ret.append(inst)
if includeInstLines and line is not None:
ret.append(line)
elif lineId is not None:
line = model.find_line(lineId)
if line is not None:
ret.append(line)
map(find_inst, blocks)
return sorted(ret)
class BlobModel(object):
"""Model bringing together blob definitions and parsed events"""
def __init__(self, unitNamePrefix=''):
self.blobs = []
self.unitNameToBlobs = {}
self.unitEvents = {}
self.clear_events()
self.picSize = Point(20,10)
self.lastTime = 0
self.unitNamePrefix = unitNamePrefix
def clear_events(self):
"""Drop all events and times"""
self.lastTime = 0
self.times = []
self.insts = {}
self.lines = {}
self.numEvents = 0
for unit, events in self.unitEvents.iteritems():
self.unitEvents[unit] = []
def add_blob(self, blob):
"""Add a parsed blob to the model"""
self.blobs.append(blob)
if blob.unit not in self.unitNameToBlobs:
self.unitNameToBlobs[blob.unit] = []
self.unitNameToBlobs[blob.unit].append(blob)
def add_inst(self, inst):
"""Add a MinorInst instruction definition to the model"""
# Is this a non micro-op instruction. Microops (usually) get their
# fetchSeqNum == 0 varient stored first
macroop_key = (inst.id.fetchSeqNum, 0)
full_key = (inst.id.fetchSeqNum, inst.id.execSeqNum)
if inst.id.execSeqNum != 0 and macroop_key not in self.insts:
self.insts[macroop_key] = inst
self.insts[full_key] = inst
def find_inst(self, id):
"""Find an instruction either as a microop or macroop"""
macroop_key = (id.fetchSeqNum, 0)
full_key = (id.fetchSeqNum, id.execSeqNum)
if full_key in self.insts:
return self.insts[full_key]
elif macroop_key in self.insts:
return self.insts[macroop_key]
else:
return None
def add_line(self, line):
"""Add a MinorLine line to the model"""
self.lines[line.id.lineSeqNum] = line
def add_unit_event(self, event):
"""Add a single event to the model. This must be an event at a
time >= the current maximum time"""
if event.unit in self.unitEvents:
events = self.unitEvents[event.unit]
if len(events) > 0 and events[len(events)-1].time > event.time:
print "Bad event ordering"
events.append(event)
self.numEvents += 1
self.lastTime = max(self.lastTime, event.time)
def extract_times(self):
"""Extract a list of all the times from the seen events. Call after
reading events to give a safe index list to use for time indices"""
times = {}
for unitEvents in self.unitEvents.itervalues():
for event in unitEvents:
times[event.time] = 1
self.times = times.keys()
self.times.sort()
def find_line(self, id):
"""Find a line by id"""
key = id.lineSeqNum
return self.lines.get(key, None)
def find_event_bisection(self, unit, time, events,
lower_index, upper_index):
"""Find an event by binary search on time indices"""
while lower_index <= upper_index:
pivot = (upper_index + lower_index) / 2
pivotEvent = events[pivot]
event_equal = (pivotEvent.time == time or
(pivotEvent.time < time and
(pivot == len(events) - 1 or
events[pivot + 1].time > time)))
if event_equal:
return pivotEvent
elif time > pivotEvent.time:
if pivot == upper_index:
return None
else:
lower_index = pivot + 1
elif time < pivotEvent.time:
if pivot == lower_index:
return None
else:
upper_index = pivot - 1
else:
return None
return None
def find_unit_event_by_time(self, unit, time):
"""Find the last event for the given unit at time <= time"""
if unit in self.unitEvents:
events = self.unitEvents[unit]
ret = self.find_event_bisection(unit, time, events,
0, len(events)-1)
return ret
else:
return None
def find_time_index(self, time):
"""Find a time index close to the given time (where
times[return] <= time and times[return+1] > time"""
ret = 0
lastIndex = len(self.times) - 1
while ret < lastIndex and self.times[ret + 1] <= time:
ret += 1
return ret
def add_minor_inst(self, rest):
"""Parse and add a MinorInst line to the model"""
pairs = parse.parse_pairs(rest)
other_pairs = dict(pairs)
id = Id().from_string(pairs['id'])
del other_pairs['id']
addr = int(pairs['addr'], 0)
del other_pairs['addr']
if 'inst' in other_pairs:
del other_pairs['inst']
# Collapse unnecessary spaces in disassembly
disassembly = re.sub(' *', ' ',
re.sub('^ *', '', pairs['inst']))
inst = Inst(id, disassembly, addr, other_pairs)
self.add_inst(inst)
elif 'fault' in other_pairs:
del other_pairs['fault']
inst = InstFault(id, pairs['fault'], addr, other_pairs)
self.add_inst(inst)
def add_minor_line(self, rest):
"""Parse and add a MinorLine line to the model"""
pairs = parse.parse_pairs(rest)
other_pairs = dict(pairs)
id = Id().from_string(pairs['id'])
del other_pairs['id']
vaddr = int(pairs['vaddr'], 0)
del other_pairs['vaddr']
if 'paddr' in other_pairs:
del other_pairs['paddr']
del other_pairs['size']
paddr = int(pairs['paddr'], 0)
size = int(pairs['size'], 0)
self.add_line(Line(id,
vaddr, paddr, size, other_pairs))
elif 'fault' in other_pairs:
del other_pairs['fault']
self.add_line(LineFault(id, pairs['fault'], vaddr, other_pairs))
def load_events(self, file, startTime=0, endTime=None):
"""Load an event file and add everything to this model"""
def update_comments(comments, time):
# Add a list of comments to an existing event, if there is one at
# the given time, or create a new, correctly-timed, event from
# the last event and attach the comments to that
for commentUnit, commentRest in comments:
event = self.find_unit_event_by_time(commentUnit, time)
# Find an event to which this comment can be attached
if event is None:
# No older event, make a new empty one
event = BlobEvent(commentUnit, time, {})
self.add_unit_event(event)
elif event.time != time:
# Copy the old event and make a new one with the right
# time and comment
newEvent = BlobEvent(commentUnit, time, event.pairs)
newEvent.visuals = dict(event.visuals)
event = newEvent
self.add_unit_event(event)
event.comments.append(commentRest)
self.clear_events()
# A negative time will *always* be different from an event time
time = -1
time_events = {}
last_time_lines = {}
minor_trace_line_count = 0
comments = []
default_colour = [[colours.unknownColour]]
next_progress_print_event_count = 1000
if not os.access(file, os.R_OK):
print 'Can\'t open file', file
exit(1)
else:
print 'Opening file', file
f = open(file)
start_wall_time = wall_time()
# Skip leading events
still_skipping = True
l = f.readline()
while l and still_skipping:
match = re.match('^\s*(\d+):', l)
if match is not None:
event_time = match.groups()
if int(event_time[0]) >= startTime:
still_skipping = False
else:
l = f.readline()
else:
l = f.readline()
match_line_re = re.compile(
'^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$')
# Parse each line of the events file, accumulating comments to be
# attached to MinorTrace events when the time changes
reached_end_time = False
while not reached_end_time and l:
match = match_line_re.match(l)
if | |
#! /usr/local/bin/python
import sys
from numpy import *
import numpy.linalg as LA
from decimal import *
from copy import deepcopy
from itertools import permutations
from ClassesFunctions import *
# S.J. 07/09/2018 - changes to use the functions defined in ClassesFunctions file to avoid redundancy
vertexOrder = []
class Base:
index = None #nucleotide Index
indexBP = None #base paired to
nt = None #NT value
active = None
helixNumber = 0 #what helix is this a part of?
def initialize(self,indexv,ntv,indexBPv):
self.index = int(indexv)
self.indexBP = int(indexBPv)
self.nt = str(ntv)
self.active = True
class Loop:
start = None
end = None
def __init__(self):
pass
class Helix:
start = None
end = None
flag = "" #ARE YOU A LOOP?
Loop = None
connected = None
edges = 0
def __init__(self):
self.connected = []
# S.J. 02/11/2018 - to keep track of edges
class Edge:
Helix1 = None
Helix2 = None
start = None
end = None
def __init__(self):
pass
class RNAInfo:
Bases = None
Loops = None
Helices = None
Edges = None # S.J. 02/11/2018 to keep track of edges
numVert = None
adjMatrix = []
degMatrix = []
laplacian = None
def __init__(self):
self.Bases = [0]
self.Loops = [0]
self.Helices = [0]
self.Edges = [0] # S.J. 02/11/2018 to keep track of edges
self.numVert = 0
def makeMatrices(self):
self.adjMatrix = []
self.degMatrix = []
self.laplacian = None
for i in range(1,len(self.Helices)):
tArray = []
for j in range(1,len(self.Helices)):
tArray.append(0)
self.adjMatrix.append(tArray)
for i in range(1,len(self.Helices)):
tArray = []
for j in range(1,len(self.Helices)):
tArray.append(0)
self.degMatrix.append(tArray)
def addBase(self,baseA):
self.Bases.append(baseA)
def printOut(self,whichBase=1000):
if whichBase == 1000:
for i in range(1,len(self.Bases)):
print ("%d\t%d\t%s\t%d" %(self.Bases[i].index,self.Bases[i].indexBP,self.Bases[i].nt,self.Bases[i].helixNumber))
for i in range(1,len(self.Helices)):
print ("for helix %d: start=%d, end=%d, flag=%s" %(i,self.Helices[i].start,self.Helices[i].end,self.Helices[i].flag))
for i in range(1,len(self.Loops)):
print ("for loop %d: start=%d, end=%d" %(i,self.Loops[i].start,self.Loops[i].end))
def printConnections(self):
for i in range(1,len(self.Helices)):
print ("helix %d is connected to: %s and has %d edges." %(i,str(self.Helices[i].connected),self.Helices[i].edges))
def printAdj(self):
print ("Adjacency Matrix:")
for i in self.adjMatrix:
print (i)
def printDeg(self):
print ("Degree Matrix:")
for i in self.degMatrix:
print (i)
def printLpl(self):
print ("Laplacian Matrix:")
for i in self.laplacian:
print (i)
def printHelices(self):
for i in range(1,len(self.Helices)):
#print "Vertex %d: start_pos=%d, end_pos=%d, flag=%s" %(i,self.Helices[i].start,self.Helices[i].end,self.Helices[i].flag)
#print "Vertex %d: start_pos: (%d, %d), end_pos: (%d, %d)" %(i,self.Helices[i].start,self.Bases[self.Helices[i].start].indexBP,self.Helices[i].end,self.Bases[self.Helices[i].end].indexBP)
#print "Vertex %d: first strand: (%d, %d), second strand: (%d, %d)" %(i,self.Helices[i].start,self.Helices[i].end,self.Bases[self.Helices[i].end].indexBP,self.Bases[self.Helices[i].start].indexBP)
print ("Vertex %d: first strand: %d %d second strand: %d %d" %(i,self.Helices[i].start,self.Helices[i].end,self.Bases[self.Helices[i].end].indexBP,self.Bases[self.Helices[i].start].indexBP))
# S.J. 02/11/2018 - to print edges information
def printEdges(self):
for i in range(1,len(self.Edges)):
print ("Edge: helix 1: %d helix 2: %d strand: %d %d" %(self.Edges[i].Helix1,self.Edges[i].Helix2,self.Edges[i].start,self.Edges[i].end))
def printOrder(self):
order = []
prevHelix = 0
for i in range(1,len(self.Bases)):
currHelix=self.Bases[i].helixNumber
if currHelix != 0 and currHelix != prevHelix:
prevHelix = currHelix
if currHelix != 0:
order.append(currHelix-1)
print ("5'-" + str(order) + "-3'")
def clear(self):
Bases = None
Loops = None
Helices = None
numVert = None
adjMatrix = []
degMatrix = []
laplacian = None
### makeMatrices ####
#####################
def makeMatrices(RNA):
self.adjMatrix = []
self.degMatrix = []
self.laplacian = None
for i in range(1,len(RNA.Helices)):
tArray = []
for j in range(1,len(RNA.Helices)):
tArray.append(0)
RNA.adjMatrix.append(tArray)
for i in range(1,len(RNA.Helices)):
tArray = []
for j in range(1,len(RNA.Helices)):
tArray.append(0)
RNA.degMatrix.append(tArray)
#Translate information from the CT file into an RNA class
def getCTInfo(arg):
f = open(arg)
RNA = RNAInfo()
line = f.readline()
while(line.split()[0] != '1'):
line = f.readline()
while(len(line.split()) > 1):
oneBase = Base()
oneBase.initialize(line.split()[0],line.split()[1],line.split()[4])
RNA.addBase(oneBase)
line = f.readline()
f.close()
return RNA
##Translate information from BPSEQ file into an RNA class
def getBPSEQInfo(arg):
f = open(arg)
RNA = RNAInfo()
line = f.readline()
while(line.split()[0] != '1'):
line = f.readline()
while(len(line.split()) > 1):
oneBase = Base()
oneBase.initialize(line.split()[0],line.split()[1],line.split()[2])
RNA.addBase(oneBase)
line = f.readline()
f.close()
return RNA
##Translate information from an adjacency matrix into an RNA class - S.J. 07/05/2018
def getAdjMatInfo(arg):
f = open(arg)
RNA = RNAInfo()
lines = f.readlines()
for i in range(0,len(lines)):
RNA.Helices.append(Helix())
line = lines[i]
tempArray = []
degree = 0
for x in line.split():
degree += float(x)
tempArray.append(float(x))
RNA.adjMatrix.append(tempArray)
tempArray = []
for j in range (1,i+1):
tempArray.append(0.0000)
tempArray.append(degree)
for j in range (i+1,len(lines)):
tempArray.append(0.0000)
RNA.degMatrix.append(tempArray)
f.close()
return RNA
## Translate information from a dot bracket notation into an RNA class - S.J. 07/25/2018
# first line of a dot bracket notation should contain the sequence, and the second line should contain the dot bracket notation
def getDotBracketInfo(arg):
f = open(arg)
lines=f.readlines()
f.close()
found_seq = False
found_dotb = False
for line in lines:
line = line.strip().split()[0]
if (not found_seq) and (line[0] == "A" or line[0] == "G" or line[0] == "C" or line[0] == "U"): # this is the sequence line
sequence = [c for c in line] # stores the base identity of the residue
found_seq = True
elif (not found_dotb) and (line[0] == "." or line[0] == "(" or line[0] == ")"): # this is the dot bracket line
dotb = [c for c in line]
found_dotb = True
stack_bp=[]
base_pair=[] # to store the residue number of base pair
RNA = RNAInfo()
for i in range(0,len(dotb)):
base_pair.append(0) # initializing base pairs with all 0's
for i in range(0,len(dotb)):
if dotb[i] == "(": # opening bracket, add the res number to stack
stack_bp.append(i)
elif dotb[i] == ")": # closing bracket, then pop the res number from the stack, and assign base_pairs to both residues
i_bp = stack_bp.pop()
base_pair[i]=i_bp+1 #as numbers start from 1
base_pair[i_bp]=i+1
#add bases to RNA
for i in range(0,len(dotb)):
oneBase = Base()
if found_seq:
oneBase.initialize(i+1,sequence[i],base_pair[i])
else:
oneBase.initialize(i+1,"N",base_pair[i])
RNA.addBase(oneBase)
return RNA
#Determine whether or not there are pseudoknots in the structure
def pseudoKnots(RNA):
for i in range(1,len(RNA.Bases)-1):
if RNA.Bases[i].indexBP > 0:
for j in range(i+1,len(RNA.Bases)):
if RNA.Bases[j].indexBP > 0:
if (j < RNA.Bases[i].indexBP and RNA.Bases[i].indexBP < RNA.Bases[j].indexBP):
return True
return False
### countHelices ####
#####################
#This method counts the number of helices and loops
def countHelices(RNA):
nHelix = 1
i = 1
#find the first.
while (RNA.Bases[i].indexBP==0):
i += 1
RNA.Bases[i].helixNumber = nHelix
RNA.Bases[i].active = False
RNA.Bases[RNA.Bases[i].indexBP].helixNumber = nHelix
RNA.Bases[RNA.Bases[i].indexBP].active = False
RNA.Helices.append(Helix())
RNA.Helices[nHelix].start = i;
RNA.Helices[nHelix].end = i;
i+=1
for j in range(i,len(RNA.Bases)):
if(RNA.Bases[j].indexBP>0 and RNA.Bases[j].active == True):
if RNA.Bases[j].indexBP+1 != RNA.Bases[j-1].indexBP:
nHelix += 1
RNA.Helices.append(Helix())
RNA.Helices[nHelix].start = j;
RNA.Helices[nHelix].end = j;
RNA.Bases[j].helixNumber = nHelix
RNA.Bases[j].active = False
RNA.Bases[RNA.Bases[j].indexBP].helixNumber = nHelix
RNA.Bases[RNA.Bases[j].indexBP].active = False
RNA.Helices[nHelix].end = j;
else:
if RNA.Bases[j].indexBP==0:
RNA.Bases[j].helixNumber = 0
for i in range(1,len(RNA.Helices)):
helixEnd = RNA.Helices[i].end
if clearPath(RNA,helixEnd,RNA.Bases[helixEnd].indexBP):
loop = Loop()
loop.start = RNA.Helices[i].start
loop.end = RNA.Bases[RNA.Helices[i].start].indexBP
RNA.Loops.append(loop)
RNA.Helices[i].flag = 'L'
RNA.Helices[i].Loop = loop
### changeHelices ####
#####################
#Combines helices if they are only separated by one unpaired NT
def changeHelices(RNA):
changes = []
for i in range(1,len(RNA.Helices)-1):
#never do this to loops
if RNA.Helices[i].flag == 'L' and RNA.Helices[i+1].flag == 'L':
pass
else:
helix2fiveStart = RNA.Helices[i+1].start
helix2fiveEnd = RNA.Helices[i+1].end
helix2threeEnd = RNA.Bases[RNA.Helices[i+1].start].indexBP
helix2threeStart = RNA.Bases[RNA.Helices[i+1].end].indexBP
helix1fiveEnd = RNA.Helices[i].end
helix1fiveStart = RNA.Helices[i].start
helix1threeStart = RNA.Bases[RNA.Helices[i].end].indexBP
helix1threeEnd = RNA.Bases[RNA.Helices[i].start].indexBP
# if statement added by S.J. 01/30/2018 to not combine helices that don't have base pairs in the correct order
if helix2threeEnd > helix1threeStart: # 3' of helix 1 starts before 3' of helix 2 ends, therefore cannot be combined
continue
Total5P = abs(helix2fiveStart - helix1fiveEnd)-1
Total3P = abs(helix1threeStart - helix2threeEnd)-1
if ((abs(Total5P + Total3P) < 2) or (abs(Total5P) == 1 and abs(Total3P) == 1)):
changes.append(i)
for i in changes: #change bases
j = 1
##Base Change
while(RNA.Bases[j].helixNumber <=i):
j += 1
for k in range(j,len(RNA.Bases)):
if RNA.Bases[k].helixNumber != 0 and RNA.Bases[k].helixNumber>i:
RNA.Bases[k].helixNumber -= 1
RNA.Helices[i].end = RNA.Helices[i+1].end
if RNA.Helices[i+1].flag == 'L':
RNA.Helices[i].flag = 'L'
RNA.Helices[i].Loop = RNA.Helices[i+1].Loop
RNA.Helices[i].Loop.start = RNA.Helices[i].start
RNA.Helices[i].Loop.end = RNA.Helices[i].end
del RNA.Helices[i+1]
for m in range(0,len(changes)):
if changes[m] > i:
changes[m] -= 1
singleHelices = []
for i in range(1,len(RNA.Helices)):
if RNA.Helices[i].start == RNA.Helices[i].end:
singleHelices.append(i)
fivePrime = RNA.Helices[i].start
threePrime = RNA.Bases[fivePrime].indexBP
print ("Helix %d is a single base-pair helix with 5' = %d and 3' = %d!" %(i,fivePrime,threePrime))
for i in singleHelices:
fivePrime = RNA.Helices[i].start
threePrime = RNA.Bases[fivePrime].indexBP
RNA.Bases[fivePrime].indexBP = 0
RNA.Bases[fivePrime].helixNumber = 0
RNA.Bases[threePrime].indexBP = 0
RNA.Bases[threePrime].helixNumber = 0
| |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for the HTMLTALParser code generator.
"""
import pprint
import unittest
from zope.tal import htmltalparser, taldefs
class TestCaseBase(unittest.TestCase):
prologue = ""
epilogue = ""
initial_program = [('version', taldefs.TAL_VERSION), ('mode', 'html')]
final_program = []
def _merge(self, p1, p2):
if p1 and p2:
op1, args1 = p1[-1]
op2, args2 = p2[0]
if op1.startswith('rawtext') and op2.startswith('rawtext'):
return (p1[:-1]
+ [rawtext(args1[0] + args2[0])]
+ p2[1:])
return p1 + p2
def _run_check(self, source, program, macros={}):
parser = htmltalparser.HTMLTALParser()
parser.parseString(self.prologue + source + self.epilogue)
got_program, got_macros = parser.getCode()
program = self._merge(self.initial_program, program)
program = self._merge(program, self.final_program)
self.assertEqual(got_program, program,
"Program:\n" + pprint.pformat(got_program)
+ "\nExpected:\n" + pprint.pformat(program))
self.assertEqual(got_macros, macros,
"Macros:\n" + pprint.pformat(got_macros)
+ "\nExpected:\n" + pprint.pformat(macros))
def _should_error(self, source, exc=taldefs.TALError):
def parse(self=self, source=source):
parser = htmltalparser.HTMLTALParser()
parser.parseString(self.prologue + source + self.epilogue)
self.assertRaises(exc, parse)
def rawtext(s):
"""Compile raw text to the appropriate instruction."""
if "\n" in s:
return ("rawtextColumn", (s, len(s) - (s.rfind("\n") + 1)))
else:
return ("rawtextOffset", (s, len(s)))
class HTMLTALParserTestCases(TestCaseBase):
def test_code_simple_identity(self):
self._run_check("""<html a='b' b="c" c=d><title>My Title</html>""", [
rawtext('<html a="b" b="c" c="d">'
'<title>My Title</title></html>'),
])
def test_code_implied_list_closings(self):
self._run_check("""<ul><li><p><p><li></ul>""", [
rawtext('<ul><li><p></p><p></p></li><li></li></ul>'),
])
self._run_check("""<dl><dt><dt><dd><dd><ol><li><li></ol></dl>""", [
rawtext('<dl><dt></dt><dt></dt><dd></dd>'
'<dd><ol><li></li><li></li></ol></dd></dl>'),
])
def test_code_implied_table_closings(self):
self._run_check(
"""<p>text <table><tr><th>head\t<tr><td>cell\t"""
"""<table><tr><td>cell \n \t \n<tr>""", [
rawtext(
'<p>text</p> <table><tr><th>head</th>'
'</tr>\t<tr><td>cell\t<table><tr><td>cell</td>'
'</tr> \n \t \n<tr></tr></table></td></tr></table>'), ])
self._run_check(
"""<table><tr><td>cell """
"""<table><tr><td>cell </table></table>""",
[rawtext('<table><tr><td>cell <table><tr><td>cell</td></tr>'
' </table></td></tr></table>'),
])
def test_code_bad_nesting(self):
def check(self=self):
self._run_check("<a><b></a></b>", [])
self.assertRaises(htmltalparser.NestingError, check)
def test_code_attr_syntax(self):
output = [
rawtext('<a b="v" c="v" d="v" e></a>'),
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_code_attr_values(self):
self._run_check(
"""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""", [
rawtext('<a b="xxx\n\txxx" c="yyy\t\nyyy" d="\txyz\n"></a>')])
self._run_check("""<a b='' c="">""", [
rawtext('<a b="" c=""></a>'),
])
def test_code_attr_entity_replacement(self):
# we expect entities *not* to be replaced by HTLMParser!
self._run_check("""<a b='&><"''>""", [
rawtext('<a b="&><"\'"></a>'),
])
self._run_check("""<a b='\"'>""", [
rawtext('<a b="""></a>'),
])
self._run_check("""<a b='&'>""", [
rawtext('<a b="&"></a>'),
])
self._run_check("""<a b='<'>""", [
rawtext('<a b="<"></a>'),
])
def test_code_attr_funky_names(self):
self._run_check("""<a a.b='v' c:d=v e-f=v>""", [
rawtext('<a a.b="v" c:d="v" e-f="v"></a>'),
])
def test_code_pcdata_entityref(self):
self._run_check(""" """, [
rawtext(' '),
])
def test_code_short_endtags(self):
self._run_check("""<html><img/></html>""", [
rawtext('<html><img /></html>'),
])
class METALGeneratorTestCases(TestCaseBase):
def test_null(self):
self._run_check("", [])
def test_define_macro(self):
macro = self.initial_program + [
('startTag', ('p', [('metal:define-macro', 'M', 'metal')])),
rawtext('booh</p>'),
]
program = [
('setPosition', (1, 0)),
('defineMacro', ('M', macro)),
]
macros = {'M': macro}
self._run_check('<p metal:define-macro="M">booh</p>', program, macros)
def test_use_macro(self):
self._run_check('<p metal:use-macro="M">booh</p>', [
('setPosition', (1, 0)),
('useMacro',
('M', '$M$', {},
[('startTag', ('p', [('metal:use-macro', 'M', 'metal')])),
rawtext('booh</p>')])),
])
def test_define_slot(self):
macro = self.initial_program + [
('startTag', ('p', [('metal:define-macro', 'M', 'metal')])),
rawtext('foo'),
('setPosition', (1, 29)),
('defineSlot', ('S',
[('startTag', ('span', [('metal:define-slot',
'S',
'metal')])),
rawtext('spam</span>')])),
rawtext('bar</p>'),
]
program = [('setPosition', (1, 0)),
('defineMacro', ('M', macro))]
macros = {'M': macro}
self._run_check('<p metal:define-macro="M">foo'
'<span metal:define-slot="S">spam</span>bar</p>',
program, macros)
def test_fill_slot(self):
self._run_check(
'<p metal:use-macro="M">foo'
'<span metal:fill-slot="S">spam</span>bar</p>', [
('setPosition', (1, 0)),
('useMacro',
('M', '$M$',
{'S': [('startTag', ('span',
[('metal:fill-slot', 'S', 'metal')])),
rawtext('spam</span>')]},
[('startTag', ('p', [('metal:use-macro', 'M', 'metal')])),
rawtext('foo'),
('setPosition', (1, 26)),
('fillSlot', ('S', [
('startTag', (
'span',
[('metal:fill-slot', 'S', 'metal')])),
rawtext('spam</span>')])),
rawtext('bar</p>')])),
])
class TALGeneratorTestCases(TestCaseBase):
def test_null(self):
self._run_check("", [])
def test_define_1(self):
self._run_check("<p tal:define='xyzzy string:spam'></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:define': 'xyzzy string:spam'}),
('setLocal', ('xyzzy', '$string:spam$')),
('startTag', ('p', [('tal:define', 'xyzzy string:spam', 'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_define_2(self):
self._run_check("<p tal:define='local xyzzy string:spam'></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:define': 'local xyzzy string:spam'}),
('setLocal', ('xyzzy', '$string:spam$')),
('startTag', ('p',
[('tal:define', 'local xyzzy string:spam', 'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_define_3(self):
self._run_check("<p tal:define='global xyzzy string:spam'></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:define': 'global xyzzy string:spam'}),
('setGlobal', ('xyzzy', '$string:spam$')),
('startTag', ('p',
[('tal:define',
'global xyzzy string:spam',
'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_define_4(self):
self._run_check("<p tal:define='x string:spam; y x'></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:define': 'x string:spam; y x'}),
('setLocal', ('x', '$string:spam$')),
('setLocal', ('y', '$x$')),
('startTag', ('p', [('tal:define', 'x string:spam; y x', 'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_define_5(self):
self._run_check("<p tal:define='x string:;;;;; y x'></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:define': 'x string:;;;;; y x'}),
('setLocal', ('x', '$string:;;$')),
('setLocal', ('y', '$x$')),
('startTag', ('p', [('tal:define', 'x string:;;;;; y x', 'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_define_6(self):
self._run_check(
"<p tal:define='x string:spam; global y x; local z y'></p>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:define': 'x string:spam; global y x; local z y'}),
('setLocal', ('x', '$string:spam$')),
('setGlobal', ('y', '$x$')),
('setLocal', ('z', '$y$')),
('startTag', ('p',
[('tal:define',
'x string:spam; global y x; local z y',
'tal')])),
('endScope', ()),
rawtext('</p>'),
])
def test_condition(self):
self._run_check(
"<p><span tal:condition='python:1'><b>foo</b></span></p>", [
rawtext('<p>'),
('setPosition', (1, 3)),
('beginScope', {'tal:condition': 'python:1'}),
('condition', ('$python:1$',
[('startTag', ('span', [('tal:condition',
'python:1',
'tal')])),
rawtext('<b>foo</b></span>')])),
('endScope', ()),
rawtext('</p>'),
])
def test_content_1(self):
self._run_check("<p tal:content='string:foo'>bar</p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:content': 'string:foo'}),
('startTag', ('p', [('tal:content', 'string:foo', 'tal')])),
('insertText', ('$string:foo$', [rawtext('bar')])),
('endScope', ()),
rawtext('</p>'),
])
def test_content_2(self):
self._run_check("<p tal:content='text string:foo'>bar</p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:content': 'text string:foo'}),
('startTag', ('p', [('tal:content', 'text string:foo', 'tal')])),
('insertText', ('$string:foo$', [rawtext('bar')])),
('endScope', ()),
rawtext('</p>'),
])
def test_content_3(self):
self._run_check("<p tal:content='structure string:<br>'>bar</p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:content': 'structure string:<br>'}),
('startTag', ('p',
[('tal:content', 'structure string:<br>', 'tal')])),
('insertStructure',
('$string:<br>$', {}, [rawtext('bar')])),
('endScope', ()),
rawtext('</p>'),
])
def test_replace_1(self):
self._run_check("<p tal:replace='string:foo'>bar</p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:replace': 'string:foo'}),
('optTag',
('p',
'',
None,
0,
[('startTag', ('p', [('tal:replace', 'string:foo', 'tal')]))],
[('insertText', ('$string:foo$', [('rawtextOffset',
('bar', 3))]))])),
('endScope', ()),
])
def test_replace_2(self):
self._run_check("<p tal:replace='text string:foo'>bar</p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:replace': 'text string:foo'}),
('optTag',
('p',
'',
None,
0,
[('startTag', ('p', [('tal:replace',
'text string:foo',
'tal')]))],
[('insertText', ('$string:foo$', [('rawtextOffset',
('bar', 3))]))])),
('endScope', ()),
])
def test_replace_3(self):
self._run_check(
"<p tal:replace='structure string:<br>'>bar</p>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:replace': 'structure string:<br>'}),
('optTag',
('p',
'',
None,
0,
[('startTag', (
'p', [(
'tal:replace',
'structure string:<br>',
'tal')]))],
[('insertStructure', ('$string:<br>$', {}, [
('rawtextOffset', ('bar', 3))]))])),
('endScope', ()), ])
def test_repeat(self):
self._run_check(
"<p tal:repeat='x python:(1,2,3)'>"
"<span tal:replace='x'>dummy</span></p>", [
('setPosition', (1, 0)),
('beginScope', {'tal:repeat': 'x python:(1,2,3)'}),
('loop', ('x', '$python:(1,2,3)$',
[('startTag', ('p',
[('tal:repeat',
'x python:(1,2,3)', 'tal')])),
('setPosition', (1, 33)),
('beginScope', {'tal:replace': 'x'}),
('optTag',
('span',
'',
None,
0,
[('startTag',
('span', [('tal:replace', 'x', 'tal')]))],
[('insertText',
('$x$',
[('rawtextOffset', ('dummy', 5))]))])),
('endScope', ()),
rawtext('</p>')])),
('endScope', ()),
])
def test_script_1(self):
self._run_check('<p tal:script="text/server-python">code</p>', [
('setPosition', (1, 0)),
('beginScope', {'tal:script': 'text/server-python'}),
('startTag', ('p',
[('tal:script', 'text/server-python', 'tal')])),
('evaluateCode', ('text/server-python',
[('rawtextOffset', ('code', 4))])),
('endScope', ()),
rawtext('</p>'),
])
def test_script_2(self):
self._run_check(
'<tal:block script="text/server-python">code</tal:block>', [
('setPosition', (1, 0)),
('beginScope', {'script': 'text/server-python'}),
('optTag',
('tal:block',
None,
'tal',
0,
[('startTag',
('tal:block', [('script', 'text/server-python', 'tal')]))],
[('evaluateCode',
('text/server-python',
[('rawtextOffset', ('code', 4))]))])),
('endScope', ())
])
def test_script_3(self):
self._run_check('<script type="text/server-python">code</script>', [
('setPosition', (1, 0)),
('beginScope', {}),
('optTag',
('script',
'',
None,
0,
[('rawtextOffset', ('<script>', 8))],
[('evaluateCode',
('text/server-python', [('rawtextOffset', ('code', 4))]))])),
('endScope', ())
])
def test_script_4(self):
self._run_check('<script type="text/javascript">code</script>', [
('rawtextOffset',
('<script type="text/javascript">code</script>', 44))
])
def test_script_5(self):
self._run_check(
"""<script type="text/javascript">var foo = '<div></div>';</script>""", [ # noqa: E501 line too long
('rawtextOffset',
("""<script type="text/javascript">var foo = '<div></div>';</script>""", # noqa: E501 line too long
64))])
def test_attributes_1(self):
self._run_check(
"<a href='foo' name='bar' tal:attributes="
"'href string:http://www.zope.org; x string:y'>"
"link</a>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:attributes':
'href string:http://www.zope.org; x string:y',
'name': 'bar', 'href': 'foo'}),
('startTag',
('a',
[('href', 'foo', 'replace', '$string:http://www.zope.org$',
0, None),
('name', 'name="bar"'),
('tal:attributes',
'href string:http://www.zope.org; x string:y', 'tal'),
('x', None, 'insert', '$string:y$', 0, None)])),
('endScope', ()),
rawtext('link</a>'),
])
def test_attributes_2(self):
self._run_check(
"<p tal:replace='structure string:<img>' "
"tal:attributes='src string:foo.png'>duh</p>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:attributes': 'src string:foo.png',
'tal:replace': 'structure string:<img>'}),
('optTag',
('p',
'',
None,
0,
[('startTag',
('p',
[('tal:replace', 'structure string:<img>', 'tal'),
('tal:attributes', 'src string:foo.png', 'tal')]))],
[('insertStructure',
('$string:<img>$',
{'src': ('$string:foo.png$', False, None)},
[('rawtextOffset', ('duh', 3))]))])),
('endScope', ())])
def test_on_error_1(self):
self._run_check(
"<p tal:on-error='string:error' "
"tal:content='notHere'>okay</p>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:content': 'notHere', 'tal:on-error': 'string:error'}),
('onError',
([('startTag', ('p',
[('tal:on-error', 'string:error', 'tal'),
('tal:content', 'notHere', 'tal')])),
('insertText', ('$notHere$', [rawtext('okay')])),
rawtext('</p>')],
[('startTag', ('p',
[('tal:on-error', 'string:error', 'tal'),
('tal:content', 'notHere', 'tal')])),
('insertText', ('$string:error$', [])),
rawtext('</p>')])),
('endScope', ()),
])
def test_on_error_2(self):
self._run_check(
"<p tal:on-error='string:error' "
"tal:replace='notHere'>okay</p>", [
('setPosition', (1, 0)),
('beginScope',
{'tal:replace': 'notHere', 'tal:on-error': 'string:error'}),
('onError',
([('optTag',
('p',
'',
None,
0,
[('startTag',
| |
<filename>results/display_results.py<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
sys.path.append('..')
sys.path.append('.')
import argparse
from pathlib import Path
import glob
import os.path
from configparser import ConfigParser
import pandas as pd
import json
import yaml
import tqdm
import numpy as np
import math
pd.options.display.max_rows = 999
pd.options.display.width = 0
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--clear-cache', action='store_true', default=False)
ap.add_argument('--agg', choices=['mean', 'std', 'min', 'count', 'last', 'none', "enum"], default='min')
ap.add_argument('--show', action='append', default=[])
ap.add_argument('--remove-outliers', action='store_true', default=False)
ap.add_argument('--interactive', action='store_true', default=False)
ap.add_argument('--save-test-configs', type=Path, help='directory where to put configuration files for winning hyperparameters')
ap.add_argument('--latex', action='store_true', default=False)
ap.add_argument('log_path')
args = ap.parse_args()
if args.clear_cache:
ans = input("You are about to clear the results cache. Are you sure? [y]/n: ") or "y"
if ans != "y":
args.clear_cache = False
else:
sys.stderr.write('warning: using cached results when available\n')
df_data = get_results(Path(args.log_path), args.clear_cache)
if args.remove_outliers:
print("removing", (df_data.total_pp >= 1000).sum(), " entries")
df_data = df_data.loc[df_data.total_pp < 1000]
df_grouped = get_df_grouped(df_data, args.agg)
if args.interactive:
breakpoint()
elif args.latex:
display_latex_results(df_grouped)
else:
display_results(df_grouped, args.show)
if args.save_test_configs:
args.save_test_configs.mkdir(exist_ok=True)
flat_df = df_to_numeric(df_grouped.reset_index(drop=True))
save_test_configs(flat_df.loc[flat_df.data.str.contains("dev")],
args.save_test_configs)
def display_latex_results(df):
df = df.reset_index(drop=False)
archs = df.architecture.unique()
nhid = df.nhid.unique()
trainers = df.weights_trainer.unique()
for arch in archs:
for h in nhid:
for trainer in trainers:
row = collect_latex_row(df, arch, h, trainer)
print(arch, h, trainer, "&", " & ".join(f"${v:.3g}$" for v in row), r'\\')
def collect_latex_row(df, arch, h, trainer):
entries = [("news_test", '10000'), ("news_test", '100000'), ("domain_test",
'10000'), ("domain_test", '20000')]
row = []
for data, lang_switch in entries:
row.extend(get_latex_values(df, arch, h, trainer, data, lang_switch))
return row
def get_latex_values(df, arch, h, trainer, data, lang_switch):
df_row = df.loc[(df.architecture == arch)&(df.nhid==h)&(df.weights_trainer==trainer)&(df.data==data)&(df.lang_switch==lang_switch)]
assert len(df_row) == 1
df_row = df_row.iloc[0]
return df_row.total_pp, df_row.surprisal_intensity, df_row.surprisal_duration
def get_results(log_path=None, clear_cache=False):
df_configs = read_configs(log_path)
df_data = add_results(df_configs, clear_cache)
df_data = df_data.rename_axis('path').reset_index()
df_data.orig_path = df_data.path
df_data.path = df_data.path.apply(lambda x: os.path.basename(x))
df_data = df_data.set_index('path', drop=False)
return df_data
def read_configs(log_path):
configs = {}
for config_file in glob.glob(str(log_path / '**/config.ini'), recursive=True):
config_file = Path(config_file)
config_dir = config_file.parent
results_file = config_dir/'general_pp.jsonl'
if (results_file).is_file():
configs[str(config_dir)] = read_config_file(config_file)
configs[str(config_dir)]['results_file'] = results_file
df = pd.DataFrame.from_dict(configs, orient = 'index')
df['data'] = df['data'].apply(lambda path: os.path.basename(path))
#df = df.drop_duplicates(keep='last')
return df
def add_results(df_configs, clear_cache):
results = {}
for config_dir in tqdm.tqdm(df_configs.index, desc='parsing results'):
results[config_dir] = read_results(Path(config_dir)/'general_pp.jsonl', clear_cache)
df_results = pd.DataFrame.from_dict(results, orient = 'index')
df_data = pd.concat([df_configs, df_results], axis=1, sort=True)
return df_data
def get_loss_history_for_row(row):
filename = row['results_file']
parsed_results = parse_results(filename)
loss_history = get_loss_history(parsed_results)
return loss_history
def read_results(filename, clear_cache):
results_cache_filename = filename.parent/'results_cache.json'
if not clear_cache and results_cache_filename.is_file():
return json.load(open(results_cache_filename))
parsed_results = parse_results(filename)
results = extract_measures(parsed_results)
json.dump(results, open(results_cache_filename, 'w'))
return results
def get_loss_history(parsed_results):
losses = np.array([r['loss'] for r in parsed_results])
return losses
def get_switch_times(parsed_results):
sequences = np.array([r['sequence'] for r in parsed_results])
sequences_ids, switch_times = np.unique(sequences, return_index=True)
return switch_times
def get_domain_history(parsed_results, switch_times):
domain_history = np.array([parsed_results[t]['domain'] for t in switch_times])
return domain_history
def get_domain_names(parsed_results):
domain_names = {}
for r in parsed_results:
domain_names[r['domain']] = r['domain_name']
return domain_names
def parse_results(filename):
with open(filename) as f:
parsed_results = []
for line in f:
parsed_line = json.loads(line)
if parsed_line['sequence'] >= 50:
parsed_results.append(parsed_line)
if not parsed_results or parsed_line['sequence'] < 99:
return []
return parsed_results
def calc_surprisal_intensity(losses, _):
return np.mean(losses[:10])
def calc_surprisal_duration(losses, prev_loss):
duration = 0
#loss_avg = np.average(losses)
for i in range(len(losses)):
if losses[i] < prev_loss:
break
duration += 1
return duration
def get_mean_loss_by_domain(parsed_results):
losses_by_domain = stitch_losses_by_domain(parsed_results)
domain_names = get_domain_names(parsed_results)
return {domain_names[d]: np.mean(losses) for d, losses in losses_by_domain.items()}
def get_losses_after_switch(parsed_results, k=10):
losses = get_loss_history(parsed_results)
switches = get_switch_times(parsed_results)
losses_after_switch = np.array([losses[t:t+k] for t in switches])
return losses_after_switch
def stitch_losses_by_domain(parsed_results):
loss_history = get_loss_history(parsed_results)
switch_times = get_switch_times(parsed_results)
dom_names = get_domain_history(parsed_results, switch_times)
loss_per_domain = {}
for i in range(len(switch_times)-1):
if dom_names[i] not in loss_per_domain:
loss_per_domain[dom_names[i]] = []
local_losses = loss_history[switch_times[i]:switch_times[i+1]]
loss_per_domain[dom_names[i]].extend(local_losses)
loss_per_domain[dom_names[-1]].extend(loss_history[switch_times[-1]:])
return loss_per_domain
def get_surprisal_by_domain(parsed_results, surprisal_measure):
loss_history = get_loss_history(parsed_results)
switch_times = get_switch_times(parsed_results)
dom_names = get_domain_history(parsed_results, switch_times)
real_domain_names = get_domain_names(parsed_results)
surprisal_per_domain = {}
avg_surprisal_per_domain = {}
prev_losses = {}
for i in range(len(switch_times)-1):
if dom_names[i] not in surprisal_per_domain:
surprisal_per_domain[dom_names[i]] = []
local_losses = loss_history[switch_times[i]:switch_times[i+1]]
if dom_names[i] in prev_losses:
surprisal_per_domain[dom_names[i]].append(surprisal_measure(local_losses, prev_losses[dom_names[i]]))
prev_losses[dom_names[i]] = np.mean(local_losses)
#surprisal_per_domain[dom_names[-1]].append(surprisal_measure(local_losses))
all_surprisals = []
for el in surprisal_per_domain:
avg_surprisal_per_domain[real_domain_names[el]] = np.average(surprisal_per_domain[el])
all_surprisals.extend(surprisal_per_domain[el])
gen_avg_surprisal = np.average(all_surprisals)
return gen_avg_surprisal, avg_surprisal_per_domain
def extract_measures(parsed_results):
loss_history = get_loss_history(parsed_results)
if len(loss_history) == 0:
return {}
loss_per_domain = stitch_losses_by_domain(parsed_results)
loss = np.mean(loss_history)
std_per_domain = {domain: np.std(d_losses) for domain, d_losses in loss_per_domain.items()}
def autocorr(x):
return np.corrcoef(x[1:], x[:-1])[0,1]
autocorr_per_domain = {domain: autocorr(d_losses) for domain, d_losses in loss_per_domain.items()}
std = np.mean(list(std_per_domain.values()))
autocorr = np.mean(list(autocorr_per_domain.values()))
total_pp = np.exp(loss) if loss < 20 else float('inf')
results = {'loss': loss, 'total_pp': total_pp,
'std': std,
'autocorr': autocorr}
loss_by_domain = get_mean_loss_by_domain(parsed_results)
surprisal_intensity, surprisal_intensity_per_domain = get_surprisal_by_domain(parsed_results, calc_surprisal_intensity)
results['surprisal_intensity'] = np.exp(surprisal_intensity)
surprisal_duration, surprisal_duration_per_domain = get_surprisal_by_domain(parsed_results, calc_surprisal_duration)
results['surprisal_duration'] = surprisal_duration
for domain, dloss in loss_by_domain.items():
results[f'loss_{domain}'] = dloss
results[f'total_pp_{domain}'] = np.exp(dloss) if loss < 20 else float('inf')
for domain, dsurprisal in surprisal_intensity_per_domain.items():
results[f'surprisal_intensity_{domain}'] = np.exp(dsurprisal)
for domain, dsurprisal in surprisal_duration_per_domain.items():
results[f'surprisal_duration_{domain}'] = dsurprisal
return results
def read_config_file(filename):
config = {}
fin = open(filename, 'r')
for count, line in enumerate(fin):
if count % 3 == 0:
key = line[1:-2]
if count % 3 == 1:
els = line.strip().split()
val = els[-1]
config[key] = val
return config
def get_id(filename):
return int(filename.parent.name)
def display_results(df_grouped, show):
pd.options.display.float_format = '{:,.3g}'.format
pd.set_option('display.max_columns', 500)
uniform_values = {}
# for col in df_grouped.columns:
# some_col_value = df_grouped.reset_index()[col].iloc[0]
# if ((df_grouped[col] == some_col_value).all()):
# uniform_values[col] = some_col_value
# df_grouped = df_grouped.drop(col, axis=1)
if not show:
show = ['data', 'total_length', 'lang_switch', 'architecture', 'nhid', 'weights_trainer']
show += ['total_pp', 'surprisal_intensity', 'surprisal_duration', 'path', 'moe_warmup']
print(df_grouped[[c for c in show if c in df_grouped.columns]])
for k, v in uniform_values.items():
print(f"{k: <20}\t{v}")
general_parameters = ['data', 'lang_switch', 'total_length', 'architecture', 'lr',
'dropout', 'emsize', 'nhid', 'nlayers']
arch_hyperparameters = {
'moe': ['lr', 'learn_iterations', 'weights_trainer',
'weights_trainer_iterations', 'max_memory_size', 'moe_warmup'],
'mos': ['lr', 'nsoftmaxes', 'learn_iterations'],
'poe': ['lang_switch', 'total_length', 'architecture', 'lr',
'weights_trainer', 'learn_iterations', 'weights_trainer_lr',
'weights_trainer_annealing', 'weight_normalization'],
'clone': ['lang_switch', 'total_length', 'architecture', 'lr',
'weights_trainer', 'learn_iterations', 'weights_trainer_lr',
'weights_trainer_annealing', 'consolidation_period', 'max_stm_size',
'max_memory_size', 'ltm_deallocation', 'stm_initialization'],
'simple' : ['lang_switch', 'total_length', 'architecture', 'nhid', 'lr',
'learn_iterations'],
'transformer' : ['nhead','transformer_warmup', 'transformer_after_warmup'],
'simple_per_domain' : ['lang_switch', 'total_length', 'architecture', 'nhid', 'lr',
'learn_iterations']}
def get_test_results_for_dev_hyperparams(df_data, dev_best, test_data):
test_rows = []
missing_rows = []
for i, row in dev_best.iterrows():
data_row = df_data.loc[row.path]
selected = df_data.reset_index()
selected = selected[selected['data'] == test_data]
for h in arch_hyperparameters[data_row['architecture']]:
val = data_row[h]
selected = selected.loc[selected[h] == val]
if selected.empty:
missing_rows.append(data_row)
continue
assert selected.ndim == 1 or (selected.iloc[0]['total_pp'] == selected['total_pp']).all() or np.isnan(selected.iloc[0]['total_pp']) or (selected.iloc[0]['architecture'] == 'simple')
test_rows.append(selected.iloc[0])
return pd.DataFrame(test_rows), pd.DataFrame(missing_rows)
def get_df_grouped(df_data, op='min'):
group_by = ['data', 'total_length', 'lang_switch', 'architecture', 'nhid', 'weights_trainer']
#pp_cols = [c for c in df_data.columns if c.startswith('total_pp')]
#loss_cols = [c for c in df_data.columns if c.startswith('loss')]
#surp_cols = [c for c in df_data.columns if c.startswith('surp')]
#show.extend(pp_cols)
#show.extend(loss_cols)
#show.extend(surp_cols)
#df_data = df_data[abs(df_data['z_score']) < 2]
if op != 'none':
merit = 'total_pp'
df_grouped = df_data.groupby(group_by)
df_data['z_score'] = df_grouped.total_pp.apply(lambda x: (x -x.mean()) /x.std())
df_grouped = df_data.groupby(group_by)
if op == 'mean':
df_grouped = df_grouped.mean()
elif op == 'count':
df_grouped = df_grouped.count()
elif op == 'std':
df_grouped = df_grouped.std()
elif op == 'last':
df_grouped = df_grouped.last()
elif op == 'enum':
df_grouped = df_grouped.apply(lambda x: x.loc[~x[merit].isna()].apply(lambda v: ", ".join(v.astype(str))))
elif op == 'min':
df_grouped = df_grouped.apply(lambda x: x.loc[x[merit] == x[merit].min(), x.keys()])
df_grouped = df_grouped.drop_duplicates(subset=[merit])
else:
df_grouped = df_data
#df_grouped = df_grouped[[merit]+show]
#.sort_values(merit)
return df_grouped
def row_to_command_line(df_data, dr_run, make_test=False):
args = []
for c in dr_run.index:
if c in ['log_dir', 'results_file', 'save']:
continue
if c in ['loss', 'pos_spikyness', 'neg_spikyness', 'total_pp', 'autocorr', 'std']:
continue
if c in ['cluster_run', 'cluster_run_name'] and not make_test:
continue
if c.startswith('loss') or c.startswith('total_pp'):
continue
val = dr_run.loc[c]
if val == 'None':
continue
if val == 'nan':
continue
if isinstance(val, float) and math.isnan(val):
continue
if val == 'False':
continue
if val == 'NA':
continue
if val == 'True':
val = ''
try:
if float(val) == int(float(val)):
val = str(int(float(val)))
except:
pass
if c not in df_data.columns:
continue
if c == 'data':
if make_test:
val = val[:-len("dev")] + "test"
data_path = '/checkpoint/germank/growing-rnn/data/'
val = os.path.join(data_path, val)
if ((val == df_data.loc[:,c]) | df_data.loc[:,c].isna() ).all():
dr_run = dr_run.drop(c, axis=0)
else:
c = c.replace('_', '-')
args.append((c,val))
command_line_args = " ".join(f"--{k} {v}" for k,v in args)
return command_line_args
def save_test_configs(df_best_cfgs, save_dir):
for k,row in df_best_cfgs.iterrows():
cfg = cfg_ds2dict(row)
if | |
31), inst.date_de)
self.assertEqual(None, inst.date_en)
inst.date = datetime.date(1999, 1, 1)
inst.save()
self.assertEqual(datetime.date(1999, 1, 1), inst.date)
self.assertEqual(datetime.date(1999, 1, 1), inst.date_de)
self.assertEqual(None, inst.date_en)
qs = models.OtherFieldsModel.objects.filter(date='1999-1-1')
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].date, datetime.date(1999, 1, 1))
trans_real.activate('en')
inst.date = datetime.date(2012, 12, 31)
self.assertEqual(datetime.date(2012, 12, 31), inst.date)
self.assertEqual(datetime.date(1999, 1, 1), inst.date_de)
self.assertEqual(datetime.date(2012, 12, 31), inst.date_en)
def test_translated_models_datetime_instance(self):
inst = models.OtherFieldsModel()
inst.datetime = datetime.datetime(2012, 12, 31, 23, 42)
self.assertEqual('de', get_language())
self.assertEqual(datetime.datetime(2012, 12, 31, 23, 42), inst.datetime)
self.assertEqual(datetime.datetime(2012, 12, 31, 23, 42), inst.datetime_de)
self.assertEqual(None, inst.datetime_en)
inst.datetime = datetime.datetime(1999, 1, 1, 23, 42)
inst.save()
self.assertEqual(datetime.datetime(1999, 1, 1, 23, 42), inst.datetime)
self.assertEqual(datetime.datetime(1999, 1, 1, 23, 42), inst.datetime_de)
self.assertEqual(None, inst.datetime_en)
qs = models.OtherFieldsModel.objects.filter(datetime='1999-1-1 23:42')
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].datetime, datetime.datetime(1999, 1, 1, 23, 42))
trans_real.activate('en')
inst.datetime = datetime.datetime(2012, 12, 31, 23, 42)
self.assertEqual(datetime.datetime(2012, 12, 31, 23, 42), inst.datetime)
self.assertEqual(datetime.datetime(1999, 1, 1, 23, 42), inst.datetime_de)
self.assertEqual(datetime.datetime(2012, 12, 31, 23, 42), inst.datetime_en)
def test_translated_models_time_instance(self):
inst = models.OtherFieldsModel()
inst.time = datetime.time(23, 42, 0)
self.assertEqual('de', get_language())
self.assertEqual(datetime.time(23, 42, 0), inst.time)
self.assertEqual(datetime.time(23, 42, 0), inst.time_de)
self.assertEqual(None, inst.time_en)
inst.time = datetime.time(1, 2, 3)
inst.save()
self.assertEqual(datetime.time(1, 2, 3), inst.time)
self.assertEqual(datetime.time(1, 2, 3), inst.time_de)
self.assertEqual(None, inst.time_en)
qs = models.OtherFieldsModel.objects.filter(time='01:02:03')
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].time, datetime.time(1, 2, 3))
trans_real.activate('en')
inst.time = datetime.time(23, 42, 0)
self.assertEqual(datetime.time(23, 42, 0), inst.time)
self.assertEqual(datetime.time(1, 2, 3), inst.time_de)
self.assertEqual(datetime.time(23, 42, 0), inst.time_en)
def test_dates_queryset(self):
Model = models.OtherFieldsModel
Model.objects.create(datetime=datetime.datetime(2015, 9, 2, 0, 0))
Model.objects.create(datetime=datetime.datetime(2014, 8, 3, 0, 0))
Model.objects.create(datetime=datetime.datetime(2013, 7, 4, 0, 0))
qs = Model.objects.dates('datetime', 'year', 'DESC')
self.assertEqual(
list(qs),
[datetime.date(2015, 1, 1), datetime.date(2014, 1, 1), datetime.date(2013, 1, 1)],
)
def test_descriptors(self):
# Descriptor store ints in database and returns string of 'a' of that length
inst = models.DescriptorModel()
# Demonstrate desired behaviour
inst.normal = 2
self.assertEqual('aa', inst.normal)
inst.normal = 'abc'
self.assertEqual('aaa', inst.normal)
# Descriptor on translated field works too
self.assertEqual('de', get_language())
inst.trans = 5
self.assertEqual('aaaaa', inst.trans)
inst.save()
db_values = models.DescriptorModel.objects.raw_values('normal', 'trans_en', 'trans_de')[0]
self.assertEqual(3, db_values['normal'])
self.assertEqual(5, db_values['trans_de'])
self.assertEqual(0, db_values['trans_en'])
# Retrieval from db
inst = models.DescriptorModel.objects.all()[0]
self.assertEqual('aaa', inst.normal)
self.assertEqual('aaaaa', inst.trans)
self.assertEqual('aaaaa', inst.trans_de)
self.assertEqual('', inst.trans_en)
# Other language
trans_real.activate('en')
self.assertEqual('', inst.trans)
inst.trans = 'q'
self.assertEqual('a', inst.trans)
inst.trans_de = 4
self.assertEqual('aaaa', inst.trans_de)
inst.save()
db_values = models.DescriptorModel.objects.raw_values('normal', 'trans_en', 'trans_de')[0]
self.assertEqual(3, db_values['normal'])
self.assertEqual(4, db_values['trans_de'])
self.assertEqual(1, db_values['trans_en'])
class ModeltranslationTestRule1(ModeltranslationTestBase):
"""
Rule 1: Reading the value from the original field returns the value in
translated to the current language.
"""
def _test_field(self, field_name, value_de, value_en, deactivate=True):
field_name_de = '%s_de' % field_name
field_name_en = '%s_en' % field_name
params = {field_name_de: value_de, field_name_en: value_en}
n = models.TestModel.objects.create(**params)
# Language is set to 'de' at this point
self.assertEqual(get_language(), 'de')
self.assertEqual(getattr(n, field_name), value_de)
self.assertEqual(getattr(n, field_name_de), value_de)
self.assertEqual(getattr(n, field_name_en), value_en)
# Now switch to "en"
trans_real.activate("en")
self.assertEqual(get_language(), "en")
# Should now be return the english one (just by switching the language)
self.assertEqual(getattr(n, field_name), value_en)
# But explicit language fields hold their values
self.assertEqual(getattr(n, field_name_de), value_de)
self.assertEqual(getattr(n, field_name_en), value_en)
n = models.TestModel.objects.create(**params)
n.save()
# Language is set to "en" at this point
self.assertEqual(get_language(), "en")
self.assertEqual(getattr(n, field_name), value_en)
self.assertEqual(getattr(n, field_name_de), value_de)
self.assertEqual(getattr(n, field_name_en), value_en)
trans_real.activate('de')
self.assertEqual(get_language(), 'de')
self.assertEqual(getattr(n, field_name), value_de)
if deactivate:
trans_real.deactivate()
def test_rule1(self):
"""
Basic CharField/TextField test.
"""
title1_de = "title de"
title1_en = "title en"
text_de = "Dies ist ein deutscher Satz"
text_en = "This is an english sentence"
self._test_field(field_name='title', value_de=title1_de, value_en=title1_en)
self._test_field(field_name='text', value_de=text_de, value_en=text_en)
def test_rule1_url_field(self):
self._test_field(
field_name='url', value_de='http://www.google.de', value_en='http://www.google.com'
)
def test_rule1_email_field(self):
self._test_field(
field_name='email',
value_de='<EMAIL>',
value_en='<EMAIL>',
)
class ModeltranslationTestRule2(ModeltranslationTestBase):
"""
Rule 2: Assigning a value to the original field updates the value
in the associated current language translation field.
"""
def _test_field(self, field_name, value1_de, value1_en, value2, value3, deactivate=True):
field_name_de = '%s_de' % field_name
field_name_en = '%s_en' % field_name
params = {field_name_de: value1_de, field_name_en: value1_en}
self.assertEqual(get_language(), 'de')
n = models.TestModel.objects.create(**params)
self.assertEqual(getattr(n, field_name), value1_de)
self.assertEqual(getattr(n, field_name_de), value1_de)
self.assertEqual(getattr(n, field_name_en), value1_en)
setattr(n, field_name, value2)
n.save()
self.assertEqual(getattr(n, field_name), value2)
self.assertEqual(getattr(n, field_name_de), value2)
self.assertEqual(getattr(n, field_name_en), value1_en)
trans_real.activate("en")
self.assertEqual(get_language(), "en")
setattr(n, field_name, value3)
setattr(n, field_name_de, value1_de)
n.save()
self.assertEqual(getattr(n, field_name), value3)
self.assertEqual(getattr(n, field_name_en), value3)
self.assertEqual(getattr(n, field_name_de), value1_de)
if deactivate:
trans_real.deactivate()
def test_rule2(self):
"""
Basic CharField/TextField test.
"""
self._test_field(
field_name='title',
value1_de='title de',
value1_en='title en',
value2='Neuer Titel',
value3='new title',
)
def test_rule2_url_field(self):
self._test_field(
field_name='url',
value1_de='http://www.google.de',
value1_en='http://www.google.com',
value2='http://www.google.at',
value3='http://www.google.co.uk',
)
def test_rule2_email_field(self):
self._test_field(
field_name='email',
value1_de='<EMAIL>',
value1_en='<EMAIL>',
value2='<EMAIL>',
value3='<EMAIL>',
)
class ModeltranslationTestRule3(ModeltranslationTestBase):
"""
Rule 3: If both fields - the original and the current language translation
field - are updated at the same time, the current language translation
field wins.
"""
def test_rule3(self):
self.assertEqual(get_language(), 'de')
title = 'title de'
# Normal behaviour
n = models.TestModel(title='foo')
self.assertEqual(n.title, 'foo')
self.assertEqual(n.title_de, 'foo')
self.assertEqual(n.title_en, None)
# constructor
n = models.TestModel(title_de=title, title='foo')
self.assertEqual(n.title, title)
self.assertEqual(n.title_de, title)
self.assertEqual(n.title_en, None)
# object.create
n = models.TestModel.objects.create(title_de=title, title='foo')
self.assertEqual(n.title, title)
self.assertEqual(n.title_de, title)
self.assertEqual(n.title_en, None)
# Database save/load
n = models.TestModel.objects.get(title_de=title)
self.assertEqual(n.title, title)
self.assertEqual(n.title_de, title)
self.assertEqual(n.title_en, None)
# This is not subject to Rule 3, because updates are not *at the ame time*
n = models.TestModel()
n.title_de = title
n.title = 'foo'
self.assertEqual(n.title, 'foo')
self.assertEqual(n.title_de, 'foo')
self.assertEqual(n.title_en, None)
@staticmethod
def _index(list, element):
for i, el in enumerate(list):
if el is element:
return i
raise ValueError
def test_rule3_internals(self):
# Rule 3 work because translation fields are added to model field list
# later than original field.
original = models.TestModel._meta.get_field('title')
translated_de = models.TestModel._meta.get_field('title_de')
translated_en = models.TestModel._meta.get_field('title_en')
fields = models.TestModel._meta.fields
# Here we cannot use simple list.index, because Field has overloaded __cmp__
self.assertTrue(self._index(fields, original) < self._index(fields, translated_de))
self.assertTrue(self._index(fields, original) < self._index(fields, translated_en))
class ModelValidationTest(ModeltranslationTestBase):
"""
Tests if a translation model field validates correctly.
"""
def assertRaisesValidation(self, func):
try:
func()
except ValidationError as e:
return e.message_dict
self.fail('ValidationError not raised.')
def _test_model_validation(self, field_name, invalid_value, valid_value):
"""
Generic model field validation test.
"""
field_name_de = '%s_de' % field_name
field_name_en = '%s_en' % field_name
# Title need to be passed here - otherwise it would not validate
params = {'title_de': 'title de', 'title_en': 'title en', field_name: invalid_value}
n = models.TestModel.objects.create(**params)
# First check the original field
# Expect that the validation object contains an error
errors = self.assertRaisesValidation(n.full_clean)
self.assertIn(field_name, errors)
# Set translation field to a valid value
# Language is set to 'de' at this point
self.assertEqual(get_language(), 'de')
setattr(n, field_name_de, valid_value)
n.full_clean()
# All language fields are validated even though original field validation raise no error
setattr(n, field_name_en, invalid_value)
errors = self.assertRaisesValidation(n.full_clean)
self.assertNotIn(field_name, errors)
self.assertIn(field_name_en, errors)
# When language is changed to en, the original field also doesn't validate
with override('en'):
setattr(n, field_name_en, invalid_value)
errors = self.assertRaisesValidation(n.full_clean)
self.assertIn(field_name, errors)
self.assertIn(field_name_en, errors)
# Set translation field to an invalid value
setattr(n, field_name_en, valid_value)
setattr(n, field_name_de, invalid_value)
# Expect that the validation object contains an error for url_de
errors = self.assertRaisesValidation(n.full_clean)
self.assertIn(field_name, errors)
self.assertIn(field_name_de, errors)
def test_model_validation_required(self):
"""
General test for CharField: if required/blank is handled properly.
"""
# Create an object without title (which is required)
n = models.TestModel.objects.create(text='Testtext')
# First check the original field
# Expect that the validation object contains an error for title
errors = self.assertRaisesValidation(n.full_clean)
self.assertIn('title', errors)
n.save()
# Check the translation field
# Language is set to 'de' at this point
self.assertEqual(get_language(), 'de')
# Set translation field to a valid title
n.title_de = 'Title'
n.full_clean()
# Change language to en
# Now validation fails, because current language (en) title is empty
# So requirement validation depends on current language
with override('en'):
errors = self.assertRaisesValidation(n.full_clean)
self.assertIn('title', errors)
# However, with fallback language (most cases), it validates (because empty title
# falls back to title_de):
with default_fallback():
n.full_clean()
# Set translation field to an empty title
n.title_de = None
# Even though the original field isn't optional, translation fields are
# per definition always optional. So we expect that the validation
# object contains no error for title_de.
# However, title still raises error, since it points to empty title_de
errors = self.assertRaisesValidation(n.full_clean)
self.assertNotIn('title_de', errors)
self.assertIn('title', errors)
def test_model_validation_url_field(self):
self._test_model_validation(
field_name='url',
invalid_value='foo en',
valid_value='http://code.google.com/p/django-modeltranslation/',
)
def test_model_validation_email_field(self):
self._test_model_validation(
field_name='email',
invalid_value='foo en',
valid_value='<EMAIL>',
)
class ModelInheritanceTest(ModeltranslationTestBase):
"""Tests for inheritance support in modeltranslation."""
def test_abstract_inheritance(self):
field_names_b = get_field_names(models.AbstractModelB)
self.assertTrue('titlea' in field_names_b)
self.assertTrue('titlea_de' in field_names_b)
self.assertTrue('titlea_en' in field_names_b)
self.assertTrue('titleb' in field_names_b)
self.assertTrue('titleb_de' in field_names_b)
self.assertTrue('titleb_en' in field_names_b)
self.assertFalse('titled' in field_names_b)
self.assertFalse('titled_de' in field_names_b)
self.assertFalse('titled_en' | |
just the sequence of past states of length delay + sequence_length + 1.
action : single action dependent on action space
Action magnitudes are penalised immediately in the case of continuous spaces and, in effect, play no role for discrete spaces as the reward in that case only depends on sequences of states. We say "in effect" because it _is_ used in case of a custom R to calculate R(s, a) but that is equivalent to using the "next" state s' as the reward determining criterion in case of deterministic transitions. _Sequences_ of _actions_ are currently NOT used to calculate the reward. Since the underlying MDP dynamics are deterministic, a state and action map 1-to-1 with the next state and so, just a sequence of _states_ should be enough to calculate the reward.
Returns
-------
double
The reward at the end of the current transition
"""
# #TODO Make reward depend on the action sequence too instead of just state sequence, as it is currently?
delay = self.delay
sequence_length = self.sequence_length
reward = 0.0
# print("TEST", self.augmented_state[0 : self.augmented_state_length - delay], state, action, self.rewardable_sequences, type(state), type(self.rewardable_sequences))
state_considered = state # if imaginary_rollout else self.augmented_state # When we imagine a rollout, the user has to provide full augmented state as the argument!!
# if not isinstance(state_considered, list):
# state_considered = [state_considered] # to get around case when sequence is an int; it should always be a list except if a user passes in a state; would rather force them to pass a list: assert for it!!
# TODO These asserts are only needed if imaginary_rollout is True, as users then pass in a state sequence
# if imaginary_rollout:
# assert isinstance(state_considered, list), "state passed in should be a list of states containing at the very least the state at beginning of the transition, s, and the one after it, s'. type was: " + str(type(state_considered))
# assert len(state_considered) == self.augmented_state_length, "Length of list of states passed should be equal to self.augmented_state_length. It was: " + str(len(state_considered))
if self.use_custom_mdp:
reward = self.config["reward_function"](state_considered, action)
self.reward_buffer.append(reward) # ##TODO Modify seq_len and delay
# code for discrete and continuous case to use buffer too?
reward = self.reward_buffer[0]
# print("rewards:", self.reward_buffer, old_reward, reward)
del self.reward_buffer[0]
elif self.config["state_space_type"] == "discrete":
if np.isnan(state_considered[0]):
pass # ###IMP: This check is to get around case of
# augmented_state_length being > 2, i.e. non-vanilla seq_len or
# delay, because then rewards may be handed out for the initial
# state being part of a sequence which is not fair since it is
# handed out without having the agent take an action.
else:
self.logger.debug(
"state_considered for reward:"
+ str(state_considered)
+ " with delay "
+ str(self.delay)
)
if not self.reward_every_n_steps or (
self.reward_every_n_steps
and self.total_transitions_episode % self.sequence_length == delay
):
# ###TODO also implement this for make_denser case and continuous envs.
sub_seq = tuple(
state_considered[1: self.augmented_state_length - delay]
)
if sub_seq in self.rewardable_sequences:
# print(state_considered, "with delay", self.delay, "rewarded with:", 1)
reward += self.rewardable_sequences[sub_seq]
else:
# print(state_considered, "with delay", self.delay, "NOT rewarded.")
pass
self.logger.info("rew" + str(reward))
elif self.config["state_space_type"] == "continuous":
# ##TODO Make reward for along a line case to be length of line
# travelled - sqrt(Sum of Squared distances from the line)? This
# should help with keeping the mean reward near 0. Since the principal
# component is always taken to be the direction of travel, this would
# mean a larger distance covered in that direction and hence would
# lead to +ve reward always and would mean larger random actions give
# a larger reward! Should penalise actions in proportion that scale then?
if np.isnan(state_considered[0][0]): # Instead of below commented out
# check, this is more robust for imaginary transitions
# if self.total_transitions_episode + 1 < self.augmented_state_length:
# + 1 because augmented_state_length is always 1 greater than seq_len + del
pass # #TODO
else:
if self.config["reward_function"] == "move_along_a_line":
# print("######reward test", self.total_transitions_episode, np.array(self.augmented_state), np.array(self.augmented_state).shape)
# #test: 1. for checking 0 distance for same action being always applied; 2. similar to 1. but for different dynamics orders; 3. similar to 1 but for different action_space_dims; 4. for a known applied action case, check manually the results of the formulae and see that programmatic results match: should also have a unit version of 4. for dist_of_pt_from_line() and an integration version here for total_deviation calc.?.
data_ = np.array(state_considered, dtype=self.dtype)[
1: self.augmented_state_length - delay,
self.config["relevant_indices"],
]
data_mean = data_.mean(axis=0)
uu, dd, vv = np.linalg.svd(data_ - data_mean)
self.logger.info(
"uu.shape, dd.shape, vv.shape ="
+ str(uu.shape)
+ str(dd.shape)
+ str(vv.shape)
)
line_end_pts = (
vv[0] * np.linspace(-1, 1, 2)[:, np.newaxis]
) # vv[0] = 1st
# eigenvector, corres. to Principal Component #hardcoded -100
# to 100 to get a "long" line which should make calculations more
# robust(?: didn't seem to be the case for 1st few trials, so changed it
# to -1, 1; even tried up to 10000 - seems to get less precise for larger
# numbers) to numerical issues in dist_of_pt_from_line() below; newaxis
# added so that expected broadcasting takes place
line_end_pts += data_mean
total_deviation = 0
for (
data_pt
) in (
data_
): # find total distance of all data points from the fit line above
total_deviation += dist_of_pt_from_line(
data_pt, line_end_pts[0], line_end_pts[-1]
)
self.logger.info(
"total_deviation of pts from fit line:" + str(total_deviation)
)
reward += -total_deviation / self.sequence_length
elif self.config["reward_function"] == "move_to_a_point": # Could
# generate target points randomly but leaving it to the user to do
# that. #TODO Generate it randomly to have random Rs?
if self.make_denser:
old_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-2 - delay, self.config["relevant_indices"]]
new_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-1 - delay, self.config["relevant_indices"]]
reward = -np.linalg.norm(new_relevant_state - self.target_point)
# Should allow other powers of the distance from target_point,
# or more norms?
reward += np.linalg.norm(old_relevant_state - self.target_point)
# Reward is the distance moved towards the target point.
# Should rather be the change in distance to target point, so reward given is +ve if "correct" action was taken and so reward function is more natural (this _is_ the current implementation)
# It's true that giving the total -ve distance from target as the loss at every step gives a stronger signal to algorithm to make it move faster towards target but this seems more natural (as in the other case loss/reward go up quadratically with distance from target point while in this case it's linear). The value function is in both cases higher for states further from target. But isn't that okay? Since the greater the challenge (i.e. distance from target), the greater is the achieved overall reward at the end.
# #TODO To enable seq_len, we can hand out reward if distance to target point is reduced (or increased - since that also gives a better signal than giving 0 in that case!!) for seq_len consecutive steps, otherwise 0 reward - however we need to hand out fixed reward for every "sequence" achieved otherwise, if we do it by adding the distance moved towards target in the sequence, it leads to much bigger rewards for larger seq_lens because of overlapping consecutive sequences.
# TODO also make_denser, sparse rewards only at target
else: # sparse reward
new_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-1 - delay, self.config["relevant_indices"]]
if (
np.linalg.norm(new_relevant_state - self.target_point)
< self.target_radius
):
reward = 1.0 # Make the episode terminate as well?
# Don't need to. If algorithm is smart enough, it will
# stay in the radius and earn more reward.
reward -= self.action_loss_weight * np.linalg.norm(
np.array(action, dtype=self.dtype)
)
elif self.config["state_space_type"] == "grid":
if self.config["reward_function"] == "move_to_a_point":
if self.make_denser:
old_relevant_state = np.array(state_considered)[-2 - delay]
new_relevant_state = np.array(state_considered)[-1 - delay]
manhat_dist_old = distance.cityblock(
old_relevant_state, np.array(self.target_point)
)
manhat_dist_new = distance.cityblock(
new_relevant_state, np.array(self.target_point)
)
reward += manhat_dist_old - manhat_dist_new
else: # sparse reward
new_relevant_state | |
* nbar + 1) * hbar / 2
res1 = symplectic.reduced_state(mu, cov, 1)
assert np.allclose(res0[0], expected0[0], atol=tol, rtol=0)
assert np.allclose(res0[1], expected0[1], atol=tol, rtol=0)
assert np.allclose(res1[0], expected1[0], atol=tol, rtol=0)
assert np.allclose(res1[1], expected1[1], atol=tol, rtol=0)
def test_loss_none(self, hbar, tol):
"""Test no loss on half a TMS leaves state unchanged"""
r = 0.543
phi = 0.432
T = 1
S = symplectic.two_mode_squeezing(r, phi)
mu = np.zeros([4])
cov = S @ S.T * (hbar / 2)
res = symplectic.loss(mu, cov, T, mode=0, hbar=hbar)
expected = mu, cov
assert np.allclose(res[0], expected[0], atol=tol, rtol=0)
assert np.allclose(res[1], expected[1], atol=tol, rtol=0)
def test_loss_thermal_state(self, hbar, tol):
"""Test loss on part of a thermal state"""
nbar = np.array([0.4532, 0.123, 0.432])
T = 0.54
mu = np.zeros([2 * len(nbar)])
cov = np.diag(2 * np.tile(nbar, 2) + 1) * (hbar / 2)
res = symplectic.loss(mu, cov, T, mode=1, hbar=hbar)
# the loss reduces the fractional mean photon number of mode 1
new_nbar = nbar * np.array([1, T, 1])
expected = mu, np.diag(2 * np.tile(new_nbar, 2) + 1) * (hbar / 2)
assert np.allclose(res[0], expected[0], atol=tol, rtol=0)
assert np.allclose(res[1], expected[1], atol=tol, rtol=0)
def test_loss_complete_random(self, hbar, tol):
"""Test loss on random state"""
T = 0
mu = np.random.random(size=[4])
cov = np.array(
[
[10.4894171, 4.44832813, 7.35223928, -14.0593551],
[4.44832813, 5.29244335, -1.48437419, -4.79381772],
[7.35223928, -1.48437419, 11.92921345, -11.47687254],
[-14.0593551, -4.79381772, -11.47687254, 19.67522694],
]
)
res = symplectic.loss(mu, cov, T, mode=0, hbar=hbar)
# the loss reduces the fractional mean photon number of mode 1
mu_exp = mu.copy()
mu_exp[0] = 0
mu_exp[2] = 0
cov_exp = np.array(
[
[hbar / 2, 0, 0, 0],
[0, 5.29244335, 0, -4.79381772],
[0, 0, hbar / 2, 0],
[0, -4.79381772, 0, 19.67522694],
]
)
assert np.allclose(res[1], cov_exp, atol=tol, rtol=0)
assert np.allclose(res[0], mu_exp, atol=tol, rtol=0)
class TestMeanPhotonNumber:
"""Tests for the mean photon number function"""
def test_coherent(self, hbar, tol):
"""Test that E(n) = |a|^2 and var(n) = |a|^2 for a coherent state"""
a = 0.23 + 0.12j
mu = np.array([a.real, a.imag]) * np.sqrt(2 * hbar)
cov = np.identity(2) * hbar / 2
mean_photon, var = symplectic.mean_photon_number(mu, cov, hbar=hbar)
assert np.allclose(mean_photon, np.abs(a) ** 2, atol=tol, rtol=0)
assert np.allclose(var, np.abs(a) ** 2, atol=tol, rtol=0)
def test_squeezed(self, hbar, tol):
"""Test that E(n)=sinh^2(r) and var(n)=2(sinh^2(r)+sinh^4(r)) for a squeezed state"""
r = 0.1
phi = 0.423
S = np.array(
[
[np.cosh(r) - np.cos(phi) * np.sinh(r), -np.sin(phi) * np.sinh(r)],
[-np.sin(phi) * np.sinh(r), np.cosh(r) + np.cos(phi) * np.sinh(r)],
]
)
mu = np.zeros([2])
cov = S @ S.T * hbar / 2
mean_photon, var = symplectic.mean_photon_number(mu, cov, hbar=hbar)
assert np.allclose(mean_photon, np.sinh(r) ** 2, atol=tol, rtol=0)
assert np.allclose(var, 2 * (np.sinh(r) ** 2 + np.sinh(r) ** 4), atol=tol, rtol=0)
def test_displaced_squeezed(self, hbar, tol):
"""Test that E(n) = sinh^2(r)+|a|^2 for a displaced squeezed state"""
a = 0.12 - 0.05j
r = 0.1
phi = 0.423
S = np.array(
[
[np.cosh(r) - np.cos(phi) * np.sinh(r), -np.sin(phi) * np.sinh(r)],
[-np.sin(phi) * np.sinh(r), np.cosh(r) + np.cos(phi) * np.sinh(r)],
]
)
mu = np.array([a.real, a.imag]) * np.sqrt(2 * hbar)
cov = S @ S.T * hbar / 2
mean_photon, _ = symplectic.mean_photon_number(mu, cov, hbar=hbar)
mean_ex = np.abs(a) ** 2 + np.sinh(r) ** 2
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
def test_displaced_thermal(self, hbar, tol):
"""Test that E(n)=|a|^2+nbar and var(n)=var_th+|a|^2(1+2nbar)"""
a = 0.12 - 0.05j
nbar = 0.123
mu = np.array([a.real, a.imag]) * np.sqrt(2 * hbar)
cov = np.diag(2 * np.tile(nbar, 2) + 1) * (hbar / 2)
mean_photon, var = symplectic.mean_photon_number(mu, cov, hbar=hbar)
mean_ex = np.abs(a) ** 2 + nbar
var_ex = nbar ** 2 + nbar + np.abs(a) ** 2 * (1 + 2 * nbar)
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
assert np.allclose(var, var_ex, atol=tol, rtol=0)
# pylint: disable=too-few-public-methods
class TestVectorExpansion:
"""Tests for expanding a displacement operation into a phase-space displacement vector"""
def test_expand_one(self, hbar, tol):
"""Test that displacement vectors are created correctly"""
alpha = 1.4 + 3.7 * 1j
mode = 4
N = 10
r = symplectic.expand_vector(alpha, mode, N, hbar)
expected = np.zeros([2 * N])
expected[mode] = np.sqrt(2 * hbar) * alpha.real
expected[mode + N] = np.sqrt(2 * hbar) * alpha.imag
assert np.allclose(r, expected, atol=tol, rtol=0)
class TestSymplecticExpansion:
"""Tests for the expanding a symplectic matrix"""
@pytest.mark.parametrize("mode", range(3))
def test_expand_one(self, mode, tol):
"""Test expanding a one mode gate"""
r = 0.1
phi = 0.423
N = 3
S = np.array(
[
[np.cosh(r) - np.cos(phi) * np.sinh(r), -np.sin(phi) * np.sinh(r)],
[-np.sin(phi) * np.sinh(r), np.cosh(r) + np.cos(phi) * np.sinh(r)],
]
)
res = symplectic.expand(S, modes=mode, N=N)
expected = np.identity(2 * N)
expected[mode, mode] = S[0, 0]
expected[mode, mode + N] = S[0, 1]
expected[mode + N, mode] = S[1, 0]
expected[mode + N, mode + N] = S[1, 1]
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("m1, m2", [[0, 1], [0, 2], [1, 2], [2, 1]])
def test_expand_two(self, m1, m2, tol):
"""Test expanding a two mode gate"""
r = 0.1
phi = 0.423
N = 4
S = symplectic.two_mode_squeezing(r, phi)
res = symplectic.expand(S, modes=[m1, m2], N=N)
expected = np.identity(2 * N)
# mode1 terms
expected[m1, m1] = S[0, 0]
expected[m1, m1 + N] = S[0, 2]
expected[m1 + N, m1] = S[2, 0]
expected[m1 + N, m1 + N] = S[2, 2]
# mode2 terms
expected[m2, m2] = S[1, 1]
expected[m2, m2 + N] = S[1, 3]
expected[m2 + N, m2] = S[3, 1]
expected[m2 + N, m2 + N] = S[3, 3]
# cross terms
expected[m1, m2] = S[0, 1]
expected[m1, m2 + N] = S[0, 3]
expected[m1 + N, m2] = S[2, 1]
expected[m1 + N, m2 + N] = S[2, 3]
expected[m2, m1] = S[1, 0]
expected[m2, m1 + N] = S[3, 0]
expected[m2 + N, m1] = S[1, 2]
expected[m2 + N, m1 + N] = S[3, 2]
assert np.allclose(res, expected, atol=tol, rtol=0)
class TestIntegration:
"""Integration tests"""
def test_inverse_ops_cancel(self, hbar, tol):
"""Test that applying squeezing and interferometers to a four mode circuit,
followed by applying the inverse operations, return the state to the vacuum"""
# the symplectic matrix
O = np.block([[np.zeros([4, 4]), np.identity(4)], [-np.identity(4), np.zeros([4, 4])]])
# begin in the vacuum state
mu_init, cov_init = symplectic.vacuum_state(4, hbar=hbar)
# add displacement
alpha = np.random.random(size=[4]) + np.random.random(size=[4]) * 1j
D = np.concatenate([alpha.real, alpha.imag])
mu = mu_init + D
cov = cov_init.copy()
# random squeezing
r = np.random.random()
phi = np.random.random()
S = symplectic.expand(symplectic.two_mode_squeezing(r, phi), modes=[0, 1], N=4)
# check symplectic
assert np.allclose(S @ O @ S.T, O, atol=tol, rtol=0)
# random interferometer
# fmt:off
u = np.array([[-0.06658906-0.36413058j, 0.07229868+0.65935896j, 0.59094625-0.17369183j, -0.18254686-0.10140904j],
[0.53854866+0.36529723j, 0.61152793+0.15022026j, 0.05073631+0.32624882j, -0.17482023-0.20103772j],
[0.34818923+0.51864844j, -0.24334624+0.0233729j, 0.3625974 -0.4034224j, 0.10989667+0.49366039j],
[0.16548085+0.14792642j, -0.3012549 -0.11387682j, -0.12731847-0.44851389j, -0.55816075-0.5639976j]])
# fmt on
U = symplectic.interferometer(u)
# check unitary
assert np.allclose(u @ u.conj().T, np.identity(4), atol=tol, rtol=0)
# check symplectic
assert np.allclose(U @ O @ U.T, O, atol=tol, rtol=0)
# apply squeezing and interferometer
cov = U @ S @ cov @ S.T @ U.T
mu = U @ S @ mu
# check we are no longer in the vacuum state
assert not np.allclose(mu, mu_init, atol=tol, rtol=0)
assert not np.allclose(cov, cov_init, atol=tol, rtol=0)
# return the inverse operations
Sinv = symplectic.expand(symplectic.two_mode_squeezing(-r, phi), modes=[0, 1], N=4)
Uinv = symplectic.interferometer(u.conj().T)
# check inverses
assert np.allclose(Uinv, np.linalg.inv(U), atol=tol, rtol=0)
assert np.allclose(Sinv, np.linalg.inv(S), atol=tol, rtol=0)
# apply the inverse operations
cov = Sinv @ Uinv @ cov @ Uinv.T @ Sinv.T
mu = Sinv @ Uinv @ mu
# inverse displacement
mu -= D
# check that we return to the vacuum state
assert np.allclose(mu, mu_init, atol=tol, rtol=0)
assert np.allclose(cov, cov_init, atol=tol, rtol=0)
def test_is_symplectic():
""" Tests that the matrices generated in the symplectic module are indeed symplectic"""
theta = np.pi / 6
r = np.arcsinh(1.0)
phi = np.pi / 8
S = symplectic.rotation(theta)
assert symplectic.is_symplectic(S)
S = symplectic.squeezing(r, theta)
assert symplectic.is_symplectic(S)
S = symplectic.beam_splitter(theta, phi)
assert symplectic.is_symplectic(S)
S = symplectic.two_mode_squeezing(r, theta)
assert symplectic.is_symplectic(S)
A = np.array([[2.0, 3.0], [4.0, 6.0]])
assert not symplectic.is_symplectic(A)
A = np.identity(3)
assert not symplectic.is_symplectic(A)
A = np.array([[2.0, 3.0], [4.0, 6.0], [4.0, 6.0]])
assert not symplectic.is_symplectic(A)
@pytest.mark.parametrize("n", [1, 2, 4])
def test_sympmat(n):
"""test X_n = | |
= None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {"global": {"log-level": 55, "verify-interval": 100}}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
verify_interval, level, _ = bigipconfigdriver._handle_global_config(r)
assert verify_interval == 100
assert level == bigipconfigdriver.DEFAULT_LOG_LEVEL
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_global_config_negative_verify_interval(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {"global": {"log-level": "ERROR", "verify-interval": -1}}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
verify_interval, level, _ = bigipconfigdriver._handle_global_config(r)
assert verify_interval == bigipconfigdriver.DEFAULT_VERIFY_INTERVAL
assert level == logging.ERROR
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_global_config_string_verify_interval(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {"global": {"log-level": "ERROR", "verify-interval": "hundred"}}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
verify_interval, level, _ = bigipconfigdriver._handle_global_config(r)
assert verify_interval == bigipconfigdriver.DEFAULT_VERIFY_INTERVAL
assert level == logging.ERROR
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['bigip'] = {'username': 'admin', 'password': '<PASSWORD>',
'url': 'http://10.10.10.10:443',
'partitions': ['common', 'velcro']}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
try:
host, port = bigipconfigdriver._handle_bigip_config(r)
assert host == '10.10.10.10'
assert port == 443
except:
assert 0
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config_missing_bigip(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_bigip_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config_missing_username(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['bigip'] = {'password': '<PASSWORD>',
'url': 'http://10.10.10.10:443',
'partitions': ['common', 'velcro']}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_bigip_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config_missing_password(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['bigip'] = {'username': 'admin',
'url': 'http://10.10.10.10:443',
'partitions': ['common', 'velcro']}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_bigip_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config_missing_url(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['bigip'] = {'username': 'admin', 'password': '<PASSWORD>',
'partitions': ['common', 'velcro']}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_bigip_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_bigip_config_missing_partitions(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['bigip'] = {'username': 'admin', 'password': '<PASSWORD>',
'url': 'http://10.10.10.10:443',
'partitions': []}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_bigip_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_vxlan_config(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['vxlan-fdb'] = {'name': 'vxlan0',
'records': [
{'name': '0a:0a:ac:10:1:5',
'endpoint': '172.16.58.3'},
{'name': '0a:0a:ac:10:1:6',
'endpoint': '192.168.127.12'}
]}
obj['vxlan-arp'] = {'arps': [
{'macAddress': '0a:0a:ac:10:1:5',
'ipAddress': '172.16.31.10',
'name': '1.2.3.4'}
]
}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
try:
bigipconfigdriver._handle_vxlan_config(r)
except:
assert 0
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_vxlan_config_missing_vxlan_name(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['vxlan-fdb'] = {'records': [
{'name': '0a:0a:ac:10:1:5',
'endpoint': '172.16.58.3'},
{'name': '0a:0a:ac:10:1:6',
'endpoint': '192.168.127.12'}
]}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_vxlan_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_handle_vxlan_config_missing_vxlan_records(request):
handler = None
try:
mgr = MockMgr()
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, mgr, 30)
obj = {}
obj['vxlan-fdb'] = {'name': 'vxlan0'}
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
r = bigipconfigdriver._parse_config(config_file)
with pytest.raises(bigipconfigdriver.ConfigError):
bigipconfigdriver._handle_vxlan_config(r)
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def _raise_value_error():
raise ValueError('No JSON object could be decoded', 0)
def test_confighandler_reset_json_error(request):
exception = _raise_value_error
common_confighandler_reset(request, exception)
def _raise_cccl_error():
raise F5CcclValidationError('Generic CCCL Error')
def test_confighandler_reset_validation_error(request):
exception = _raise_cccl_error
common_confighandler_reset(request, exception)
def _raise_unexpected_error():
raise Exception('Unexpected Failure')
def test_confighandler_reset_unexpected_error(request):
exception = _raise_unexpected_error
common_confighandler_reset(request, exception)
def common_confighandler_reset(request, exception):
handler = None
mgr = None
flags = {'valid_interval_state': True}
try:
# Force an error on the fourth invocation, verify interval timer
# is disabled during retries
def handle_results():
if mgr.calls == 4:
# turn on retries by returning an error
exception()
valid_interval_state = flags['valid_interval_state']
if mgr.calls == 1 or mgr.calls == 5:
# verify interval timer is off due to previous error
if valid_interval_state:
valid_interval_state =\
(handler._interval.is_running() is False)
else:
if valid_interval_state:
valid_interval_state =\
(handler._interval.is_running() is True)
flags['valid_interval_state'] = valid_interval_state
event = threading.Event()
mgr = MockMgr(notify_event=event, notify_after=5,
handle_results=handle_results)
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
# keep the interval timer from expiring during retries
interval_time = 0.6
handler = bigipconfigdriver.ConfigHandler(config_file, [mgr],
interval_time)
# give the thread an opportunity to spin up
time.sleep(0)
assert mgr.calls == 0
obj = deepcopy(_cloud_config)
obj['global']['verify-interval'] = interval_time
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
assert handler._thread.is_alive() is True
handler.notify_reset()
time.sleep(0.1)
assert mgr.calls == 1
assert flags['valid_interval_state'] is True
handler.notify_reset()
time.sleep(0.1)
assert mgr.calls == 2
assert flags['valid_interval_state'] is True
handler.notify_reset()
time.sleep(0.1)
assert mgr.calls == 3
assert flags['valid_interval_state'] is True
# in the failure case, the exception will be caught
# and the backoff_timer will be set. Verify the
# backoff time has doubled.
handler._backoff_time = 0.6
handler.notify_reset()
time.sleep(0.1)
assert mgr.calls == 4
assert flags['valid_interval_state'] is True
assert handler._backoff_time == 1.2
assert handler._backoff_timer is not None
assert handler._interval.is_running() is False
handler.notify_reset()
time.sleep(0.1)
event.wait(30)
assert event.is_set() is True
assert flags['valid_interval_state'] is True
# After a successful call, we should be back to using the
# interval timer
assert handler._backoff_time == 1
assert handler._backoff_timer is None
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_confighandler_execution(request):
handler = None
try:
# Each execution of the regenerate_config_f5() should take as
# long as the interval timer to verify we adjust for this.
interval_time = 0.20
def handle_results():
time.sleep(interval_time)
mgr = MockMgr(handle_results=handle_results)
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
# make the interval timer the same as the execution time
handler = bigipconfigdriver.ConfigHandler(config_file, [mgr],
interval_time)
# give the thread an opportunity to spin up
time.sleep(0)
assert mgr.calls == 0
obj = deepcopy(_cloud_config)
obj['global']['verify-interval'] = interval_time
with open(config_file, 'w+') as f:
def fin():
os.unlink(config_file)
request.addfinalizer(fin)
json.dump(obj, f)
assert handler._thread.is_alive() is True
# The time spent in the execution of the regenerate_config_f5() should
# not delay the next interval. So we expect to have at least
# 'total_time / interval' number of calls.
total_time = 1.00
# If we didn't account for execution time, we'd get about 50% of
# the expected, so we'll use 75% to account for clock slop.
min_expected_calls = int(0.75 * total_time / interval_time)
handler.notify_reset()
time.sleep(total_time)
assert mgr.calls >= min_expected_calls
finally:
assert handler is not None
handler.stop()
handler._thread.join(30)
assert handler._thread.is_alive() is False
def test_confighandler_checkpoint(request):
handler = None
try:
event = threading.Event()
mgr = MockMgr(notify_event=event, notify_after=5)
config_template = Template('/tmp/config.$pid')
config_file = config_template.substitute(pid=os.getpid())
handler = bigipconfigdriver.ConfigHandler(config_file, [mgr],
0.25)
# give the thread an opportunity to spin up
time.sleep(0)
assert | |
<gh_stars>1-10
"""
Copyright (C) 2016-2019 <NAME>
Licensed under Illinois Open Source License (see the file LICENSE). For more information
about the license, see http://otm.illinois.edu/disclose-protect/illinois-open-source-license.
Contain optimization benchmark problems.
"""
import numpy as np
from .problem import Problem
def benchmark(func_name, domain=None, sd=None):
"""
Generate an optimization benchmark problem.
Args:
func_name (str): Benchmark function name.
domain (list of tuples or None, optional): Optimization domain.
For example, `domain` = [(0, 1), (0, 2)] means optimizing a
2D function of argument ``x`` on the domain:
``0 <= x[0] <= 1`` and ``0 <= x[1] <= 2``. If None, we use default
value associated with `func_name`.
sd (float, optional): Standard deviation of Gaussian noise added to the benchmark function.
If None, then we use the default value associated with `func_name`.
Returns:
prob (`Problem` type): Optimization problem instance for benchmark `func_name`.
Raises:
ValueError: If `func_name` is not implemented.
"""
if func_name == 'Schaffer':
sd = 0.02 if sd is None else sd
prob = benchmark_to_problem(Schaffer(domain), sd)
elif func_name == 'Goldstein':
sd = 2. if sd is None else sd
prob = benchmark_to_problem(Goldstein(domain), sd)
elif func_name == 'DropWave':
sd = 0.02 if sd is None else sd
prob = benchmark_to_problem(DropWave(domain), sd)
elif func_name == 'SixHumpCamel':
sd = 0.1 if sd is None else sd
prob = benchmark_to_problem(SixHumpCamel(domain), sd)
elif func_name == 'Ackley10':
sd = 1. if sd is None else sd
prob = benchmark_to_problem(Ackley(10, domain), sd)
elif func_name == 'Alpine10':
sd = 1. if sd is None else sd
prob = benchmark_to_problem(Alpine(10, domain), sd)
elif func_name == 'Hartmann6':
sd = 0.05 if sd is None else sd
prob = benchmark_to_problem(Hartmann6(domain), sd)
elif func_name == 'Rastrigin2':
sd = 0.5 if sd is None else sd
prob = benchmark_to_problem(Rastrigin(2, domain), sd)
elif func_name == 'Griewank10':
sd = 2. if sd is None else sd
prob = benchmark_to_problem(Griewank(10, domain), sd)
elif func_name == 'Levy10':
sd = 1. if sd is None else sd
prob = benchmark_to_problem(Levy(10, domain), sd)
elif func_name == 'SumPower10':
sd = 0.05 if sd is None else sd
prob = benchmark_to_problem(SumPower(10, domain), sd)
elif func_name == 'PowerSum':
sd = 1. if sd is None else sd
prob = benchmark_to_problem(PowerSum(domain), sd)
else:
raise ValueError('Unknown func_name.')
return prob
def benchmark_to_problem(benchmark, sd):
"""
Convert a benchmark instance to its corresponding optimization problem instance.
Args:
benchmark (Benchmark function type (e.g. `Schaffer`)): Benchmark instance.
sd (float): Standard deviation of Gaussian noise added to the benchmark function.
Returns:
prob (`Problem` type): Optimization problem instance corresponding to `benchmark`.
Raises:
ValueError: If `sd` < 0.
"""
if sd < 0:
raise ValueError('sd must be non-negative.')
func = lambda x: benchmark.f(x)+np.random.normal(0, sd) # add Gaussian noise
prob = Problem(benchmark.domain, func=func, name=benchmark.name,
true_func=benchmark.f, min_loc=benchmark.min,
min_val=benchmark.fmin, noise_type='Gaussian', sd=sd)
return prob
class Schaffer:
"""
`Schaffer N.2 function <https://www.sfu.ca/~ssurjano/schaffer2.html>`.
"""
def __init__(self, domain=None):
"""
Constructor.
Args:
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.domain = [(-100., 100.), (-100., 100.)] if domain is None else domain
self.dim = len(self.domain)
self.min = np.array([(0., 0.)]) # global minimum locations
self.fmin = 0. # global minimum
self.name = 'Schaffer'
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
Raises:
ValueError: If dimension of `x` is wrong.
"""
# sanity check
if x.shape != (self.dim, ):
raise ValueError('Wrong dimension for x.')
# evaluate the function
x1 = x[0]
x2 = x[1]
y = 0.5+(np.sin(x1**2-x2**2)**2-0.5)/(1+(1e-3)*(x1**2+x2**2))**2
return y
class Goldstein:
"""
`Goldstein-Price function <https://www.sfu.ca/~ssurjano/goldpr.html>`.
"""
def __init__(self, domain=None):
"""
Constructor.
Args:
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.domain = [(-2., 2.),(-2., 2.)] if domain is None else domain
self.dim = len(self.domain)
self.min = np.array([(0., -1.)]) # global minimum locations
self.fmin = 3. # global minimum
self.name = 'Goldstein'
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
Raises:
ValueError: If dimension of `x` is wrong.
"""
# sanity check
if x.shape != (self.dim, ):
raise ValueError('Wrong dimension for x.')
# evaluate the function
x1 = x[0]
x2 = x[1]
fact1a = (x1 + x2 + 1)**2
fact1b = 19 - 14*x1 + 3*x1**2 - 14*x2 + 6*x1*x2 + 3*x2**2
fact1 = 1 + fact1a*fact1b
fact2a = (2*x1 - 3*x2)**2
fact2b = 18 - 32*x1 + 12*x1**2 + 48*x2 - 36*x1*x2 + 27*x2**2
fact2 = 30 + fact2a*fact2b
y = fact1*fact2
return y
class SixHumpCamel:
"""
`Six-hump camel function <https://www.sfu.ca/~ssurjano/camel6.html>`.
"""
def __init__(self, domain=None):
"""
Constructor.
Args:
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.domain = [(-3., 3.), (-2., 2.)] if domain is None else domain
self.dim = len(self.domain)
self.min = np.array([(0.0898, -0.7126), (-0.0898, 0.7126)]) # global minimum locations
self.fmin = -1.0316 # global minimum
self.name = 'SixHumpCamel'
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
Raises:
ValueError: If dimension of `x` is wrong.
"""
# sanity check
if x.shape != (self.dim, ):
raise ValueError('Wrong dimension for x.')
# evaluate the function
x1 = x[0]
x2 = x[1]
term1 = (4-2.1*x1**2 + (x1**4)/3) * x1**2
term2 = x1*x2
term3 = (-4+4*x2**2) * x2**2
y = term1 + term2 + term3
return y
class DropWave:
"""
`Drop-wave function <https://www.sfu.ca/~ssurjano/drop.html>`.
"""
def __init__(self, domain=None):
"""
Constructor.
Args:
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.domain = [(-5.12, 5.12), (-5.12, 5.12)] if domain is None else domain
self.dim = len(self.domain)
self.min = np.array([(0., 0.)]) # global minimum locations
self.fmin = -1. # global minimum
self.name = 'DropWave'
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
Raises:
ValueError: If dimension of `x` is wrong.
"""
# sanity check
if x.shape != (self.dim, ):
raise ValueError('Wrong dimension for x.')
# evaluate the function
y = -(1+np.cos(12*np.sqrt(x[0]**2+x[1]**2))) / (0.5*(x[0]**2+x[1]**2)+2)
return y
class Alpine:
"""
`Alpine function <http://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Alpine01>`.
"""
def __init__(self, dim, domain=None):
"""
Constructor.
Args:
dim (int): Dimension of function.
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.dim = dim
self.domain = [(-10., 10.)]*dim if domain is None else domain
self.min = np.array([tuple([0.]*dim)]) # global minimum locations
self.fmin = 0. # global minimum
self.name = 'Alpine%d' % dim
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
Raises:
ValueError: If dimension of `x` is wrong.
"""
# sanity check
if x.shape != (self.dim, ):
raise ValueError('Wrong dimension for x.')
# evaluate the function
y = np.absolute(x*np.sin(x) + 0.1*x).sum()
return y
class Ackley:
"""
`Ackley function <https://www.sfu.ca/~ssurjano/ackley.html>`.
"""
def __init__(self, dim, domain=None):
"""
Constructor.
Args:
dim (int): Dimension of function.
domain (list of tuples or None, optional): Optimization domain.
If None, we use the default value.
"""
self.dim = dim
self.domain =[(-32.768, 32.768)]*dim if domain is None else domain
self.min = np.array([tuple([0.]*dim)]) # global minimum locations
self.fmin = 0. # global minimum
self.name = 'Ackley%d' % dim
def f(self, x):
"""
Expression of function.
Args:
x (1d array): Function input.
Returns:
y (float): Function evaluation at `x`.
| |
"""
batch_propagation_results.py
"""
import enum
import io
import json
import time
import urllib
from typing import List, Optional, Dict
import numpy as np
import pandas as pd
import requests
from dateutil import parser as dateparser
from adam import stk, ApsRestServiceResultsProcessor, AuthenticatingRestProxy, RestRequests
class OrbitEventType(enum.Enum):
"""Events of interest, from an orbit propagation.
This is the same as PositionOrbitType, but with updated naming to be consistent with the
server-side enum.
"""
MISS = 'MISS'
CLOSE_APPROACH = 'CLOSE_APPROACH'
IMPACT = 'IMPACT'
class ResultsClient(object):
"""Module for managing results interactions.
"""
def __init__(self, rest=AuthenticatingRestProxy(RestRequests())):
"""Initialize the Results Service API client.
Args:
rest (RestProxy): a RestProxy that makes calls to the ADAM API.
"""
self._rest = rest
def get_monte_carlo_results(self, job):
"""Get the job results for a specific job for a specific project.
Args:
job (Job): The job id or Job object that has the Job ID in it
Returns:
result (MonteCarloResults): a result object that can be used to query for data about the
submitted job
"""
results_processor = ApsRestServiceResultsProcessor(self._rest,
job.get_project_id())
return MonteCarloResults(results_processor, job.get_uuid())
class ApsResults:
"""API for retrieving job details"""
@classmethod
def _from_rest_with_raw_ids(cls, rest, project_uuid, job_uuid):
results_processor = ApsRestServiceResultsProcessor(rest, project_uuid)
return ApsResults(results_processor, job_uuid)
def __init__(self, client, job_uuid):
self._rp = client
self._job_uuid = job_uuid
self._results_uuid = None
self._results = None
def __str__(self):
return f'{self.json()}'
def job_id(self):
return self._job_uuid
def json(self):
return {'job_uuid': self._job_uuid,
'results': self._results}
def check_status(self):
return self._rp.check_status(self._job_uuid)['status']
def wait_for_complete(self, max_wait_sec=60, print_waiting=False):
"""Polls the job until the job completes.
Args:
max_wait_sec (int): the maximum time in seconds to run the wait.
Defaults to 60.
print_waiting (boolean): Whether to print the waiting status messages.
Defaults to False.
Returns:
str: the job status.
"""
sleep_time_sec = 10.0
t0 = time.perf_counter()
status = self.check_status()
last_status = ''
count = 0
while status != 'COMPLETED':
if print_waiting:
if last_status != status:
print(status)
count = 0
if count == 40:
count = 0
print()
print('.', end='')
elapsed = time.perf_counter() - t0
if elapsed > max_wait_sec:
raise RuntimeError(
f'Computation has exceeded desired wait period of {max_wait_sec} sec.')
last_status = status
time.sleep(sleep_time_sec)
status = self.check_status()
def get_results(self, force_update=True):
if force_update or self._results is None:
results = self._rp.get_results(self._job_uuid)
self._results = results
return self._results
class MonteCarloResults(ApsResults):
"""API for retrieving propagation results and summaries"""
class PositionOrbitType(enum.Enum):
"""The type of orbit position in relation to a target body."""
MISS = 'MISS'
CLOSE_APPROACH = 'CLOSE_APPROACH'
IMPACT = 'IMPACT'
@classmethod
def _from_rest_with_raw_ids(cls, rest, project_uuid, job_uuid):
results_processor = ApsRestServiceResultsProcessor(rest, project_uuid)
return MonteCarloResults(results_processor, job_uuid)
def __init__(self, results_processor, job_uuid):
ApsResults.__init__(self, results_processor, job_uuid)
self._detailedOutputs = None
self._summary = None
def get_summary(self, force_update=False):
"""Get the propagation results summary.
Args:
force_update(boolean): True if calling this method should be re-executed,
otherwise False (default).
Returns:
summary (MonteCarloSummary)
"""
self._update_results(force_update)
# {"totalMisses": 6, "totalImpacts": 0, "totalCloseApproaches": 12}
misses = self._summary.get('totalMisses')
if misses is None:
misses = 0
close_approaches = self._summary.get('totalCloseApproaches')
if close_approaches is None:
close_approaches = 0
impacts = self._summary.get('totalImpacts')
if impacts is None:
impacts = 0
denominator = misses + impacts
probability = 0
if denominator > 0:
probability = impacts / (misses + impacts)
return MonteCarloSummary(
misses=misses,
close_approach=close_approaches,
impacts=impacts,
pc=probability
)
def get_final_positions(self, position_orbit_type: PositionOrbitType, force_update=False):
"""Get the final positions of all propagated objects in the job.
Args:
position_orbit_type (PositionOrbitType): the type of orbit position to filter.
force_update (boolean): whether the request should be re-executed.
Returns:
list: A list of the final orbit positions, filtered by the position_orbit_type.
"""
self._update_results(force_update)
position_type_string = position_orbit_type.value
final_positions = self._detailedOutputs['finalPositionsByType'].get(position_type_string)
if final_positions is None:
return []
positions = final_positions['finalPosition']
return_data = list(
map(lambda p: [np.datetime64(dateparser.parse(p['epoch'])), p['x'], p['y'], p['z']],
positions))
return return_data
def get_result_ephemeris_count(self, force_update=False):
"""Get the number of ephemerides.
Args:
force_update (boolean): whether the request should be re-executed.
Returns:
int: the number of ephemerides generated from the propagation.
"""
self._update_results(force_update)
ephemeris = self._detailedOutputs.get('ephemeris')
if ephemeris is None:
return 0
ephemeris_objects = ephemeris.get('ephemerisResourcePath')
if ephemeris_objects is None:
return 0
return len(ephemeris_objects)
def get_result_raw_ephemeris(self, run_number, force_update=False):
"""Get an ephemeris for a particular run in the batch.
Args:
force_update (boolean): whether the request should be re-executed.
Returns:
str: the ephemeris file as a string.
"""
self._update_results(force_update)
ephemeris = self._detailedOutputs['ephemeris']
ephemeris_resource_name = ephemeris['ephemerisResourcePath'][run_number]
base_url = ephemeris['resourceBasePath']
if not base_url.endswith('/'):
base_url = base_url + '/'
url = urllib.parse.urljoin(base_url, ephemeris_resource_name)
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
def get_result_ephemeris(self, run_number, force_update=False) -> pd.DataFrame:
"""Get an ephemeris for a particular run in the batch as a Pandas DataFrame
Args:
force_update (boolean): whether the request should be re-executed.
Returns:
ephemeris: Ephemeris from file as a Pandas DataFrame
"""
ephemeris_text = self.get_result_raw_ephemeris(run_number, force_update)
ephemeris = stk.io.ephemeris_file_data_to_dataframe(ephemeris_text.splitlines())
return ephemeris
def list_result_ephemerides_files(
self, page_size: int = 100, page_token: str = None) -> Dict:
"""List one page of ephemerides files from the job results.
Args:
page_size (int): The size of the results to retrieve
page_token (str): Which page to retrieve
Returns:
Dict containing the ephemerides paths
"""
params = {}
if page_size < 0 or page_size > 100:
page_size = 100
params['pageSize'] = page_size
if page_token:
params['pageToken'] = page_token
ephs = self._rp._rest.get(
f'/projects/{self._rp._project}/jobs/{self._job_uuid}'
f'/ephemerides?{urllib.parse.urlencode(params)}')
return ephs
def list_all_ephemerides_files(self) -> Dict:
"""Lists all ephemerides from the job results.
Performs all the paging without user intervention.
Returns:
Dict containing the ephemerides paths
"""
ephs = self.list_result_ephemerides_files()
while 'nextPageToken' in ephs:
next_page_token = ephs['nextPageToken']
_, e = self.list_result_ephemerides_files(page_token=next_page_token)
ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])
return ephs
def get_ephemeris_content(self, run_index: int,
orbit_event_type: Optional[OrbitEventType] = None,
force_update: bool = False) -> str:
"""Retrieves an ephemeris file and returns the text content.
This doesn't use the ADAM REST wrapper, since that class assumes the response will be in
json, and this is an ephemeris. For now, it's fine to use `requests` directly.
Args:
run_index (int): The run number of the ephemeris
orbit_event_type (Optional[OrbitEventType]): The OrbitEventType of the ephemeris
force_update (bool): Whether the results should be reloaded from the server
Returns:
str: The ephemeris content, as a string.
"""
self._update_results(force_update)
file_prefix = (f"{self._detailedOutputs['jobOutputPath']}"
f"/{self._detailedOutputs['ephemeridesDirectoryPrefix']}")
eph_name = f'run-{run_index}-00000-of-00001.e'
# Retrieve all the file paths, ignoring 404s. Ephems are supposed to only map to either
# MISS or IMPACT. If the orbit_event_type isn't provided, then brute-force try to get the
# the file path for both MISS and IMPACT. We wouldn't know which one exists without
# listing the bucket, and sometimes that might just be too much to wade through.
file_paths = []
if orbit_event_type is None:
file_paths.append(f"{file_prefix}/{OrbitEventType.MISS.value}/{eph_name}")
file_paths.append(f"{file_prefix}/{OrbitEventType.IMPACT.value}/{eph_name}")
else:
file_paths.append(f"{file_prefix}/{orbit_event_type.value}/{eph_name}")
responses = [r for r in [requests.get(f) for f in file_paths] if r.status_code != 404]
# There should just be 1 successful response (assuming the orbit_event_type and run_index
# are correct)
if responses and responses[0].status_code < 300:
return responses[0].text
resp_tuples = [(r.status_code, r.text) for r in responses]
raise RuntimeError(f'There was a problem getting the ephemeris.\n{resp_tuples}')
def get_ephemeris_as_dataframe(self, run_index: int,
orbit_event_type: Optional[
OrbitEventType] = None) -> pd.DataFrame:
"""Get ephemeris content and convert it to a pandas DataFrame.
Args:
run_index (int): The run number of the ephemeris
orbit_event_type (Optional[OrbitEventType]): The OrbitEventType of the ephemeris
Returns:
pandas.DataFrame: The STK ephemeris in a pandas DataFrame.
"""
ephem_text = self.get_ephemeris_content(run_index=run_index,
orbit_event_type=orbit_event_type)
return stk.io.ephemeris_file_data_to_dataframe(ephem_text.splitlines())
def list_state_files(self, force_update: bool = False) -> List[str]:
"""List the state files generated during the propagation.
Args:
force_update (bool): Whether the results should be reloaded from the server
Returns:
list (str): a list of URL strings for the state files.
"""
self._update_results(force_update)
state_files = self._detailedOutputs['states']
file_prefix = self._detailedOutputs['jobOutputPath']
return [f"{file_prefix}/{s}" for s in state_files]
def get_states_content(self, orbit_event_type: OrbitEventType,
force_update: bool = False) -> str:
"""Retrieves a states file and returns the content as a string.
This doesn't use the ADAM REST wrapper, since that class assumes the response will be in
json, and this is an ephemeris. For now, it's fine to use `requests` directly.
Args:
orbit_event_type (OrbitEventType): The type of OrbitEvent for which to retrieve the
states output.
force_update (bool): Whether the results should be reloaded from the server
Returns:
str: The content of the state file.
"""
self._update_results(force_update)
state_files = [f"{self._detailedOutputs['jobOutputPath']}/{f}" for f in
self._detailedOutputs['states'] if
f'states/{orbit_event_type.value}' in f]
if not state_files:
return ''
response = requests.get(state_files[0])
if response.status_code >= 300:
raise RuntimeError(
f"Unable to retrieve state file: HTTP status | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for Residual Networks.
Residual networks ('v1' ResNets) were originally proposed in:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant was introduced by:
[2] <NAME>, <NAME>, <NAME>, <NAME>
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
DEFAULT_VERSION = 2
DEFAULT_DTYPE = tf.float32
CASTABLE_TYPES = (tf.float16, )
ALLOWED_TYPES = (DEFAULT_DTYPE, ) + CASTABLE_TYPES
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization(
inputs=inputs,
axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=training,
fused=True)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
################################################################################
# ResNet block definitions.
################################################################################
def _building_block_v1(inputs, filters, training, projection_shortcut, strides,
data_format):
"""A single block for ResNet v1, without a bottleneck.
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by <NAME>, <NAME>, <NAME>, and <NAME>, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(
inputs=shortcut, training=training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by <NAME>, <NAME>, <NAME>, and <NAME>, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
return inputs + shortcut
def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,
strides, data_format):
"""A single block for ResNet v1, with a bottleneck.
Similar to _building_block_v1(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by <NAME>, <NAME>, <NAME>, and <NAME>, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(
inputs=shortcut, training=training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _bottleneck_block_v2(inputs, filters, training, projection_shortcut,
strides, data_format):
"""A single block for ResNet v2, with a bottleneck.
Similar to _building_block_v2(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by <NAME>, <NAME>, <NAME>, and <NAME>, Dec 2015.
Adapted to the ordering conventions of:
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by <NAME>, <NAME>, <NAME>, and <NAME>, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
data_format=data_format)
return inputs + shortcut
def block_layer(inputs, | |
scene list.
merge_lists(source.chapters[chId].srtScenes, self.chapters[chId].srtScenes)
#--- Merge project attributes.
if source.title:
# avoids deleting the title, if it is empty by accident
self.title = source.title
if source.desc is not None:
self.desc = source.desc
if source.authorName is not None:
self.authorName = source.authorName
if source.authorBio is not None:
self.authorBio = source.authorBio
if source.fieldTitle1 is not None:
self.fieldTitle1 = source.fieldTitle1
if source.fieldTitle2 is not None:
self.fieldTitle2 = source.fieldTitle2
if source.fieldTitle3 is not None:
self.fieldTitle3 = source.fieldTitle3
if source.fieldTitle4 is not None:
self.fieldTitle4 = source.fieldTitle4
for fieldName in self._PRJ_KWVAR:
try:
self.kwVar[fieldName] = source.kwVar[fieldName]
except:
pass
# Add new chapters to the chapter list.
# Deletion of chapters is not considered.
# The sort order of chapters may not change.
merge_lists(source.srtChapters, self.srtChapters)
# Split scenes by inserted part/chapter/scene dividers.
# This must be done after regular merging
# in order to avoid creating duplicate IDs.
if sourceHasSceneContent:
sceneSplitter = Splitter()
sceneSplitter.split_scenes(self)
return 'yWriter project data updated or created.'
def write(self):
"""Write instance variables to the yWriter xml file.
Open the yWriter xml file located at filePath and replace the instance variables
not being None. Create new XML elements if necessary.
Return a message beginning with the ERROR constant in case of error.
Overrides the superclass method.
"""
if self.is_locked():
return f'{ERROR}yWriter seems to be open. Please close first.'
self._build_element_tree()
message = self._write_element_tree(self)
if message.startswith(ERROR):
return message
return self._postprocess_xml_file(self.filePath)
def is_locked(self):
"""Check whether the yw7 file is locked by yWriter.
Return True if a .lock file placed by yWriter exists.
Otherwise, return False.
"""
return os.path.isfile(f'{self.filePath}.lock')
def _build_element_tree(self):
"""Modify the yWriter project attributes of an existing xml element tree."""
def build_scene_subtree(xmlScn, prjScn):
if prjScn.title is not None:
try:
xmlScn.find('Title').text = prjScn.title
except(AttributeError):
ET.SubElement(xmlScn, 'Title').text = prjScn.title
if xmlScn.find('BelongsToChID') is None:
for chId in self.chapters:
if scId in self.chapters[chId].srtScenes:
ET.SubElement(xmlScn, 'BelongsToChID').text = chId
break
if prjScn.desc is not None:
try:
xmlScn.find('Desc').text = prjScn.desc
except(AttributeError):
ET.SubElement(xmlScn, 'Desc').text = prjScn.desc
if xmlScn.find('SceneContent') is None:
ET.SubElement(xmlScn, 'SceneContent').text = prjScn.sceneContent
if xmlScn.find('WordCount') is None:
ET.SubElement(xmlScn, 'WordCount').text = str(prjScn.wordCount)
if xmlScn.find('LetterCount') is None:
ET.SubElement(xmlScn, 'LetterCount').text = str(prjScn.letterCount)
if prjScn.isUnused:
if xmlScn.find('Unused') is None:
ET.SubElement(xmlScn, 'Unused').text = '-1'
elif xmlScn.find('Unused') is not None:
xmlScn.remove(xmlScn.find('Unused'))
#--- Write scene fields.
scFields = xmlScn.find('Fields')
if prjScn.isNotesScene:
if scFields is None:
scFields = ET.SubElement(xmlScn, 'Fields')
try:
scFields.find('Field_SceneType').text = '1'
except(AttributeError):
ET.SubElement(scFields, 'Field_SceneType').text = '1'
elif scFields is not None:
if scFields.find('Field_SceneType') is not None:
if scFields.find('Field_SceneType').text == '1':
scFields.remove(scFields.find('Field_SceneType'))
if prjScn.isTodoScene:
if scFields is None:
scFields = ET.SubElement(xmlScn, 'Fields')
try:
scFields.find('Field_SceneType').text = '2'
except(AttributeError):
ET.SubElement(scFields, 'Field_SceneType').text = '2'
elif scFields is not None:
if scFields.find('Field_SceneType') is not None:
if scFields.find('Field_SceneType').text == '2':
scFields.remove(scFields.find('Field_SceneType'))
#--- Write scene custom fields.
for field in self._SCN_KWVAR:
if field in self.scenes[scId].kwVar and self.scenes[scId].kwVar[field]:
if scFields is None:
scFields = ET.SubElement(xmlScn, 'Fields')
try:
scFields.find(field).text = self.scenes[scId].kwVar[field]
except(AttributeError):
ET.SubElement(scFields, field).text = self.scenes[scId].kwVar[field]
elif scFields is not None:
try:
scFields.remove(scFields.find(field))
except:
pass
if prjScn.status is not None:
try:
xmlScn.find('Status').text = str(prjScn.status)
except:
ET.SubElement(xmlScn, 'Status').text = str(prjScn.status)
if prjScn.sceneNotes is not None:
try:
xmlScn.find('Notes').text = prjScn.sceneNotes
except(AttributeError):
ET.SubElement(xmlScn, 'Notes').text = prjScn.sceneNotes
if prjScn.tags is not None:
try:
xmlScn.find('Tags').text = ';'.join(prjScn.tags)
except(AttributeError):
ET.SubElement(xmlScn, 'Tags').text = ';'.join(prjScn.tags)
if prjScn.field1 is not None:
try:
xmlScn.find('Field1').text = prjScn.field1
except(AttributeError):
ET.SubElement(xmlScn, 'Field1').text = prjScn.field1
if prjScn.field2 is not None:
try:
xmlScn.find('Field2').text = prjScn.field2
except(AttributeError):
ET.SubElement(xmlScn, 'Field2').text = prjScn.field2
if prjScn.field3 is not None:
try:
xmlScn.find('Field3').text = prjScn.field3
except(AttributeError):
ET.SubElement(xmlScn, 'Field3').text = prjScn.field3
if prjScn.field4 is not None:
try:
xmlScn.find('Field4').text = prjScn.field4
except(AttributeError):
ET.SubElement(xmlScn, 'Field4').text = prjScn.field4
if prjScn.appendToPrev:
if xmlScn.find('AppendToPrev') is None:
ET.SubElement(xmlScn, 'AppendToPrev').text = '-1'
elif xmlScn.find('AppendToPrev') is not None:
xmlScn.remove(xmlScn.find('AppendToPrev'))
# Date/time information
if (prjScn.date is not None) and (prjScn.time is not None):
dateTime = f'{prjScn.date} {prjScn.time}'
if xmlScn.find('SpecificDateTime') is not None:
xmlScn.find('SpecificDateTime').text = dateTime
else:
ET.SubElement(xmlScn, 'SpecificDateTime').text = dateTime
ET.SubElement(xmlScn, 'SpecificDateMode').text = '-1'
if xmlScn.find('Day') is not None:
xmlScn.remove(xmlScn.find('Day'))
if xmlScn.find('Hour') is not None:
xmlScn.remove(xmlScn.find('Hour'))
if xmlScn.find('Minute') is not None:
xmlScn.remove(xmlScn.find('Minute'))
elif (prjScn.day is not None) or (prjScn.hour is not None) or (prjScn.minute is not None):
if xmlScn.find('SpecificDateTime') is not None:
xmlScn.remove(xmlScn.find('SpecificDateTime'))
if xmlScn.find('SpecificDateMode') is not None:
xmlScn.remove(xmlScn.find('SpecificDateMode'))
if prjScn.day is not None:
try:
xmlScn.find('Day').text = prjScn.day
except(AttributeError):
ET.SubElement(xmlScn, 'Day').text = prjScn.day
if prjScn.hour is not None:
try:
xmlScn.find('Hour').text = prjScn.hour
except(AttributeError):
ET.SubElement(xmlScn, 'Hour').text = prjScn.hour
if prjScn.minute is not None:
try:
xmlScn.find('Minute').text = prjScn.minute
except(AttributeError):
ET.SubElement(xmlScn, 'Minute').text = prjScn.minute
if prjScn.lastsDays is not None:
try:
xmlScn.find('LastsDays').text = prjScn.lastsDays
except(AttributeError):
ET.SubElement(xmlScn, 'LastsDays').text = prjScn.lastsDays
if prjScn.lastsHours is not None:
try:
xmlScn.find('LastsHours').text = prjScn.lastsHours
except(AttributeError):
ET.SubElement(xmlScn, 'LastsHours').text = prjScn.lastsHours
if prjScn.lastsMinutes is not None:
try:
xmlScn.find('LastsMinutes').text = prjScn.lastsMinutes
except(AttributeError):
ET.SubElement(xmlScn, 'LastsMinutes').text = prjScn.lastsMinutes
# Plot related information
if prjScn.isReactionScene:
if xmlScn.find('ReactionScene') is None:
ET.SubElement(xmlScn, 'ReactionScene').text = '-1'
elif xmlScn.find('ReactionScene') is not None:
xmlScn.remove(xmlScn.find('ReactionScene'))
if prjScn.isSubPlot:
if xmlScn.find('SubPlot') is None:
ET.SubElement(xmlScn, 'SubPlot').text = '-1'
elif xmlScn.find('SubPlot') is not None:
xmlScn.remove(xmlScn.find('SubPlot'))
if prjScn.goal is not None:
try:
xmlScn.find('Goal').text = prjScn.goal
except(AttributeError):
ET.SubElement(xmlScn, 'Goal').text = prjScn.goal
if prjScn.conflict is not None:
try:
xmlScn.find('Conflict').text = prjScn.conflict
except(AttributeError):
ET.SubElement(xmlScn, 'Conflict').text = prjScn.conflict
if prjScn.outcome is not None:
try:
xmlScn.find('Outcome').text = prjScn.outcome
except(AttributeError):
ET.SubElement(xmlScn, 'Outcome').text = prjScn.outcome
if prjScn.image is not None:
try:
xmlScn.find('ImageFile').text = prjScn.image
except(AttributeError):
ET.SubElement(xmlScn, 'ImageFile').text = prjScn.image
# Characters/locations/items
if prjScn.characters is not None:
characters = xmlScn.find('Characters')
try:
for oldCrId in characters.findall('CharID'):
characters.remove(oldCrId)
except(AttributeError):
characters = ET.SubElement(xmlScn, 'Characters')
for crId in prjScn.characters:
ET.SubElement(characters, 'CharID').text = crId
if prjScn.locations is not None:
locations = xmlScn.find('Locations')
try:
for oldLcId in locations.findall('LocID'):
locations.remove(oldLcId)
except(AttributeError):
locations = ET.SubElement(xmlScn, 'Locations')
for lcId in prjScn.locations:
ET.SubElement(locations, 'LocID').text = lcId
if prjScn.items is not None:
items = xmlScn.find('Items')
try:
for oldItId in items.findall('ItemID'):
items.remove(oldItId)
except(AttributeError):
items = ET.SubElement(xmlScn, 'Items')
for itId in prjScn.items:
ET.SubElement(items, 'ItemID').text = itId
def build_chapter_subtree(xmlChp, prjChp, sortOrder):
try:
xmlChp.find('SortOrder').text = str(sortOrder)
except(AttributeError):
ET.SubElement(xmlChp, 'SortOrder').text = str(sortOrder)
try:
xmlChp.find('Title').text = prjChp.title
except(AttributeError):
ET.SubElement(xmlChp, 'Title').text = prjChp.title
if prjChp.desc is not None:
try:
xmlChp.find('Desc').text = prjChp.desc
except(AttributeError):
ET.SubElement(xmlChp, 'Desc').text = prjChp.desc
if xmlChp.find('SectionStart') is not None:
if prjChp.chLevel == 0:
xmlChp.remove(xmlChp.find('SectionStart'))
elif prjChp.chLevel == 1:
ET.SubElement(xmlChp, 'SectionStart').text = '-1'
if prjChp.oldType is not None:
try:
xmlChp.find('Type').text = str(prjChp.oldType)
except(AttributeError):
ET.SubElement(xmlChp, 'Type').text = str(prjChp.oldType)
if prjChp.chType is not None:
try:
xmlChp.find('ChapterType').text = str(prjChp.chType)
except(AttributeError):
ET.SubElement(xmlChp, 'ChapterType').text = str(prjChp.chType)
if prjChp.isUnused:
if xmlChp.find('Unused') is None:
ET.SubElement(xmlChp, 'Unused').text = '-1'
elif xmlChp.find('Unused') is not None:
xmlChp.remove(xmlChp.find('Unused'))
#--- Write chapter fields.
chFields = xmlChp.find('Fields')
if prjChp.suppressChapterTitle:
if chFields is None:
chFields = ET.SubElement(xmlChp, 'Fields')
try:
chFields.find('Field_SuppressChapterTitle').text = '1'
except(AttributeError):
ET.SubElement(chFields, 'Field_SuppressChapterTitle').text = '1'
elif chFields is not None:
if chFields.find('Field_SuppressChapterTitle') is not None:
chFields.find('Field_SuppressChapterTitle').text = '0'
if prjChp.suppressChapterBreak:
if chFields is None:
chFields = ET.SubElement(xmlChp, 'Fields')
try:
chFields.find('Field_SuppressChapterBreak').text = '1'
except(AttributeError):
ET.SubElement(chFields, 'Field_SuppressChapterBreak').text = '1'
elif chFields is not None:
if chFields.find('Field_SuppressChapterBreak') is not None:
chFields.find('Field_SuppressChapterBreak').text = '0'
if prjChp.isTrash:
if chFields is None:
chFields = ET.SubElement(xmlChp, 'Fields')
try:
chFields.find('Field_IsTrash').text = '1'
except(AttributeError):
ET.SubElement(chFields, 'Field_IsTrash').text = '1'
elif chFields is not None:
if chFields.find('Field_IsTrash') is not None:
chFields.remove(chFields.find('Field_IsTrash'))
#--- Write chapter custom fields.
for field in self._CHP_KWVAR:
if field in self.chapters[chId].kwVar and self.chapters[chId].kwVar[field]:
if chFields is None:
chFields = ET.SubElement(xmlChp, 'Fields')
try:
chFields.find(field).text = self.chapters[chId].kwVar[field]
except(AttributeError):
ET.SubElement(chFields, field).text = self.chapters[chId].kwVar[field]
elif chFields is not None:
try:
chFields.remove(chFields.find(field))
except:
pass
#--- Rebuild the chapter's scene list.
try:
xScnList = xmlChp.find('Scenes')
xmlChp.remove(xScnList)
except:
pass
if prjChp.srtScenes:
sortSc = ET.SubElement(xmlChp, 'Scenes')
for scId in prjChp.srtScenes:
ET.SubElement(sortSc, 'ScID').text = scId
def build_location_subtree(xmlLoc, prjLoc, sortOrder):
ET.SubElement(xmlLoc, 'ID').text = lcId
if prjLoc.title is not None:
ET.SubElement(xmlLoc, 'Title').text = prjLoc.title
if prjLoc.image is not None:
ET.SubElement(xmlLoc, 'ImageFile').text = prjLoc.image
if prjLoc.desc is not None:
ET.SubElement(xmlLoc, 'Desc').text = prjLoc.desc
if prjLoc.aka is not None:
ET.SubElement(xmlLoc, 'AKA').text = prjLoc.aka
if prjLoc.tags is not None:
ET.SubElement(xmlLoc, 'Tags').text = ';'.join(prjLoc.tags)
ET.SubElement(xmlLoc, 'SortOrder').text = str(sortOrder)
#--- Write location custom fields.
lcFields = xmlLoc.find('Fields')
for field in self._LOC_KWVAR:
if field in self.locations[lcId].kwVar and self.locations[lcId].kwVar[field]:
if lcFields is None:
lcFields = ET.SubElement(xmlLoc, 'Fields')
try:
lcFields.find(field).text = self.locations[lcId].kwVar[field]
except(AttributeError):
ET.SubElement(lcFields, field).text = | |
if(stride_size.is_integer() and stride_size > 0):
padding_size = p
padding_depth = padding_size
stride_depth = stride_size
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
kernel_size_depth = kernel_size
# print(in_channels,'in_channels')
return [int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,stride_height,stride_width,padding_depth,padding_height,padding_width,dilation_depth,dilation_width,dilation_height,groups]]
elif(layer_id == 3):
# print(output_size,'output_size')
assert output_size==None,"反卷积层只支持不指定输出大小"
input_length = input_size[2]
in_channels,out_channels,kernel_size,stride,padding,output_padding,dilation,groups = [0 for index in range(8)]
in_channels = input_size[1]
if(output_size==None):
if(in_channels <= 3):
out_channels = random.randint(16,64)
else:
out_channels = random.randint(int(in_channels*2),in_channels*3)
output_size = [0,0,0,0]
output_size = [input_size[0],out_channels,input_length]
#生成宽高一样的kernel
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
common_divisor = self.get_common_divisor(in_channels,out_channels)
output_padding_size = 0
if(len(common_divisor) == 1):
#只有公约数1
groups = 1
else:
groups = self.prob_random(common_divisor,[0.95]+[(1-0.95)/(len(common_divisor)-1) for i in range(len(common_divisor)-1)])
#生成stride,padding
if(True):
stride_size = 1
padding_size = random.randint(0,kernel_size)
ouput_length = output_padding_size + stride_size*(input_length - 1) - 2*padding_size + dilation_size*(kernel_size - 1) + 1
#计算output_size
output_size = [input_size[0],out_channels,ouput_length]
# print([int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size,stride,padding,output_padding,dilation,groups]])
return [int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size,stride_size,padding,output_padding,dilation_size,groups]]
elif(layer_id == 4):
#由于size无法成倍缩小,暂时只支持output_size = None
# print(output_size,'output_size')
assert output_size==None,"反卷积层只支持不指定输出大小"
input_height = input_size[2]
input_width = input_size[3]
in_channels,out_channels,kernel_size_height,kernel_size_width,stride_height,\
stride_width,padding_height,padding_width,dilation,groups,output_padding_height,output_padding_width = [0 for index in range(12)]
in_channels = input_size[1]
if(output_size==None):
if(in_channels <= 3):
out_channels = random.randint(16,64)
else:
out_channels = random.randint(int(in_channels*2),in_channels*3)
output_size = [0,0,0,0]
output_size = [input_size[0],out_channels,input_height,input_width]
#生成宽高一样的kernel
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
kernel_size_height,kernel_size_width = kernel_size,kernel_size
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
common_divisor = self.get_common_divisor(in_channels,out_channels)
output_padding_height = 0
output_padding_width = 0
if(len(common_divisor) == 1):
#只有公约数1
groups = 1
else:
groups = self.prob_random(common_divisor,[0.95]+[(1-0.95)/(len(common_divisor)-1) for i in range(len(common_divisor)-1)])
#生成stride,padding
if(True):
stride_size = 1
stride_height,stride_width = stride_size,stride_size
padding_size = random.randint(0,kernel_size)
padding_height,padding_width = padding_size,padding_size
ouput_height = output_padding_height + stride_height*(input_height - 1) - 2*padding_height + dilation_size*(kernel_size_height - 1) + 1
ouput_width = output_padding_width + stride_width*(input_width - 1) - 2*padding_width + dilation_size*(kernel_size_width - 1) + 1
# output_padding_height = ouput_height - stride_height*(input_height - 1) + 2*padding_height - dilation_size*(kernel_size_height - 1) - 1
# output_padding_width = ouput_width - stride_width*(input_width - 1) + 2*padding_width - dilation_size*(kernel_size_width - 1) - 1
#计算output_size
output_size = [input_size[0],out_channels,ouput_height,ouput_width]
# print([int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size_height,kernel_size_width,stride_height,stride_width,padding_height,padding_width,output_padding_height,output_padding_width,dilation_size,groups]])
return [int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size_height,kernel_size_width,stride_height,stride_width,padding_height,padding_width,output_padding_height,output_padding_width,dilation_size,groups]]
# return nn.ConvTranspose2d(in_channels,out_channels,(kernel_size_height, kernel_size_width), stride=(stride_height, stride_width),\
# padding=(padding_height, padding_width),output_padding=(output_padding_height,output_padding_width),dilation=dilation,groups=groups)
elif(layer_id == 5):
# print(output_size,'output_size')
assert output_size==None,"反卷积层只支持不指定输出大小"
input_depth = input_size[2]
input_height = input_size[3]
input_width = input_size[4]
in_channels,out_channels,kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,\
stride_height,stride_width,padding_depth,padding_height,padding_width,output_padding_depth,output_padding_height,output_padding_width,\
dilation,groups = [0 for index in range(16)]
in_channels = input_size[1]
if(output_size==None):
if(in_channels <= 3):
out_channels = random.randint(16,64)
else:
out_channels = random.randint(int(in_channels*2),in_channels*3)
output_size = [0,0,0,0]
output_size = [input_size[0],out_channels,input_depth,input_height,input_width]
#生成宽高一样的kernel
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
kernel_size_depth,kernel_size_height,kernel_size_width = kernel_size,kernel_size,kernel_size
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
common_divisor = self.get_common_divisor(in_channels,out_channels)
output_padding_height = 0
output_padding_width = 0
output_padding_depth = 0
if(len(common_divisor) == 1):
#只有公约数1
groups = 1
else:
groups = self.prob_random(common_divisor,[0.95]+[(1-0.95)/(len(common_divisor)-1) for i in range(len(common_divisor)-1)])
#生成stride,padding
if(True):
stride_size = 1
stride_height,stride_width,stride_depth = stride_size,stride_size,stride_size
padding_size = random.randint(0,kernel_size)
padding_depth,padding_height,padding_width = padding_size,padding_size,padding_size
ouput_depth = output_padding_depth + stride_depth*(input_depth - 1) - 2*padding_depth + dilation_size*(kernel_size_depth - 1) + 1
ouput_height = output_padding_height + stride_height*(input_height - 1) - 2*padding_height + dilation_size*(kernel_size_height - 1) + 1
ouput_width = output_padding_width + stride_width*(input_width - 1) - 2*padding_width + dilation_size*(kernel_size_width - 1) + 1
# output_padding_height = ouput_height - stride_height*(input_height - 1) + 2*padding_height - dilation_size*(kernel_size_height - 1) - 1
# output_padding_width = ouput_width - stride_width*(input_width - 1) + 2*padding_width - dilation_size*(kernel_size_width - 1) - 1
#计算output_size
output_size = [input_size[0],out_channels,ouput_depth,ouput_height,ouput_width]
# print([int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,\
# stride_height,stride_width,padding_depth,padding_height,padding_width,output_padding_depth,output_padding_height,output_padding_width,\
# dilation_size,groups]])
return [int(i) for i in input_size+output_size+[in_channels,out_channels,kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,\
stride_height,stride_width,padding_depth,padding_height,padding_width,output_padding_depth,output_padding_height,output_padding_width,\
dilation_size,groups]]
# return nn.ConvTranspose3d(in_channels,out_channels,(kernel_size_depth, kernel_size_height, kernel_size_width), stride=(stride_depth, stride_height, stride_width),\
# padding=(padding_depth, padding_height, padding_width),output_padding=(output_padding_depth, output_padding_height, output_padding_width),dilation=dilation,groups=groups)
elif(layer_id == 6):
#如果是max pooling则需要返回indices
input_length = input_size[2]
input_channels = input_size[1]
kernel_size,stride_size,padding_size,dilation_size,pool_type = [0 for index in range(5)]
pool_type = random.randint(0,1)
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
if(pool_type == 1):
dilation_size = 1
if(output_size==None):
# stride_size = self.prob_random([1,2,3],[0.6,0.3,0.1])
stride_size = 1
padding_size = random.randint(0,kernel_size)
#计算output_size
out_length = (input_length + 2*padding_size - dilation_size*(kernel_size-1) - 1)/(stride_size) + 1
output_size = [input_size[0],input_channels,out_length]
else:
#通过已知的kernel_size,dilation_size计算stride_size,padding_size的整数解
out_length = output_size[2]
input_length = input_size[2]
find = False
find_count = 0
while not find:
find_count += 1
# print(kernel_size)
assert find_count < 30, "疑似找不到符合要求的神经网络层"
for p in range(0,int(kernel_size/2)+1):
stride_size = (input_length + 2*p - dilation_size*(kernel_size-1) - 1)/(out_length - 1)
if(stride_size.is_integer() and stride_size > 0):
padding_size = p
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
return [int(i) for i in input_size+output_size+[kernel_size,stride_size,padding_size,dilation_size,pool_type]]
elif(layer_id == 7):
#如果是max pooling则需要返回indices
input_height = input_size[2]
input_width = input_size[3]
input_channels = input_size[1]
kernel_size_height,kernel_size_width,stride_height,stride_width,padding_height,padding_width,dilation_height,dilation_width,pool_type = [0 for index in range(9)]
pool_type = random.randint(0,1)
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
kernel_size_height,kernel_size_width = kernel_size,kernel_size
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
dilation_height,dilation_width = dilation_size,dilation_size
if(pool_type == 1):
dilation_height,dilation_width = 1,1
if(output_size==None):
# stride_size = self.prob_random([1,2,3],[0.6,0.3,0.1])
stride_size = 1
stride_height,stride_width = stride_size,stride_size
padding_size = 0
padding_height,padding_width = padding_size,padding_size
#计算output_size
out_height = math.floor((input_height + 2*padding_height - dilation_height*(kernel_size_height-1) - 1)/(stride_height) + 1)
out_width = math.floor((input_width + 2*padding_width - dilation_width*(kernel_size_width-1) - 1)/(stride_width) + 1)
output_size = [input_size[0],input_channels,out_height,out_width]
else:
#通过已知的kernel_size,dilation_size计算stride_size,padding_size的整数解
out_height = output_size[2]
in_height = input_size[2]
find = False
find_count = 0
while not find:
find_count += 1
# print(kernel_size)
assert find_count < 30, "疑似找不到符合要求的神经网络层"
for p in range(0,int(kernel_size/2)+1):
stride_size = (in_height + 2*p - dilation_size*(kernel_size-1) - 1)/(out_height - 1)
if(stride_size.is_integer() and stride_size > 0):
padding_size = p
padding_height,padding_width = padding_size,padding_size
stride_height,stride_width = stride_size,stride_size
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
kernel_size_height,kernel_size_width = kernel_size,kernel_size
return [int(i) for i in input_size+output_size+[kernel_size_height,kernel_size_width,stride_height,stride_width,padding_height,padding_width,dilation_height,dilation_width,pool_type]]
elif(layer_id == 8):
#如果是max pooling则需要返回indices
input_depth = input_size[2]
input_height = input_size[3]
input_width = input_size[4]
input_channels = input_size[1]
kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,stride_height,\
stride_width,padding_depth,padding_height,padding_width,dilation_depth,dilation_height,dilation_width,pool_type = [0 for index in range(13)]
pool_type = random.randint(0,1)
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
kernel_size_depth,kernel_size_height,kernel_size_width = kernel_size,kernel_size,kernel_size
#生成宽高一样的dilation
dilation_size = self.prob_random([1,2],[0.95,0.05])
dilation_depth,dilation_height,dilation_width = dilation_size,dilation_size,dilation_size
if(pool_type == 1):
dilation_depth,dilation_height,dilation_width = 1,1,1
if(output_size==None):
# stride_size = self.prob_random([1,2,3],[0.6,0.3,0.1])
stride_size = 1
stride_depth,stride_height,stride_width = stride_size,stride_size,stride_size
padding_size = random.randint(0,kernel_size)
padding_depth,padding_height,padding_width = padding_size,padding_size,padding_size
#计算output_size
out_depth = (input_depth + 2*padding_depth - dilation_depth*(kernel_size_depth-1) - 1)/(stride_depth) + 1
out_height = (input_height + 2*padding_height - dilation_height*(kernel_size_height-1) - 1)/(stride_height) + 1
out_width = (input_width + 2*padding_width - dilation_width*(kernel_size_width-1) - 1)/(stride_width) + 1
output_size = [input_size[0],input_channels,out_depth,out_height,out_width]
else:
#通过已知的kernel_size,dilation_size计算stride_size,padding_size的整数解
out_height = output_size[3]
in_height = input_size[3]
find = False
find_count = 0
while not find:
find_count += 1
# print(kernel_size)
assert find_count < 30, "疑似找不到符合要求的神经网络层"
for p in range(0,int(kernel_size/2)+1):
stride_size = (in_height + 2*p - dilation_size*(kernel_size-1) - 1)/(out_height - 1)
if(stride_size.is_integer() and stride_size > 0):
padding_size = p
padding_height,padding_width = padding_size,padding_size
stride_height,stride_width = stride_size,stride_size
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
kernel_size_height,kernel_size_width = kernel_size,kernel_size
out_depth = output_size[2]
in_depth = input_size[2]
find = False
find_count = 0
while not find:
find_count += 1
# print(kernel_size)
assert find_count < 30, "疑似找不到符合要求的神经网络层"
for p in range(0,int(kernel_size/2)+1):
stride_size = (in_depth + 2*p - dilation_size*(kernel_size-1) - 1)/(out_depth - 1)
if(stride_size.is_integer() and stride_size > 0):
padding_size = p
padding_depth = padding_size
stride_depth = stride_size
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
kernel_size_depth = kernel_size
return [int(i) for i in input_size+output_size+[kernel_size_depth,kernel_size_height,kernel_size_width,stride_depth,stride_height,\
stride_width,padding_depth,padding_height,padding_width,dilation_depth,dilation_height,dilation_width,pool_type]]
elif(layer_id == 9):
input_channels = input_size[1]
input_length = input_size[2]
kernel_size,stride_size,padding_size = [0 for index in range(3)]
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
if(output_size==None):
# stride_size = self.prob_random([1,2,3],[0.6,0.3,0.1])
stride_size = 1
padding_size = random.randint(0,kernel_size)
#计算output_size
out_length = (input_length-1)*stride_size - 2*padding_size + kernel_size
output_size = [input_size[0],input_channels,out_length]
else:
#通过已知的kernel_size,dilation_size计算stride_size,padding_size的整数解
out_length = output_size[2]
input_length = input_size[2]
find = False
find_count = 0
while not find:
find_count += 1
# print(kernel_size)
assert find_count < 30, "疑似找不到符合要求的神经网络层"
for padding in range(0,int(kernel_size/2)+1):
stride_size = (2*padding - kernel_size + out_length) / (input_length-1)
if(stride_size.is_integer() and stride_size > 0):
padding_size = padding
find = True
break
else:
kernel_size = self.prob_random([1,2,3,4,5,6,7],[1/7 for i in range(7)])
return [int(i) for i in input_size+output_size+[kernel_size,stride_size,padding_size]]
# return nn.MaxUnpool2d(kernel_size = (kernel_size_height, kernel_size_width), stride=(stride_height, stride_width),padding=(padding_height, padding_width))
# return nn.MaxUnpool1d(kernel_size = kernel_size, stride=stride, padding=padding)
elif(layer_id == 10):
input_channels = input_size[1]
input_height = input_size[2]
input_width = input_size[3]
kernel_size_height,kernel_size_width,stride_height,stride_width,padding_height,padding_width = [0 for index in range(6)]
kernel_size = self.prob_random([1,2,3,4,5,6,7],[0.05,0.05,0.4,0.05,0.3,0.05,0.1])
kernel_size_height,kernel_size_width = kernel_size,kernel_size
if(output_size==None):
# stride_size = self.prob_random([1,2,3],[0.6,0.3,0.1])
stride_size = 1
stride_height,stride_width = stride_size,stride_size
padding_size = random.randint(0,kernel_size)
padding_height,padding_width = padding_size,padding_size
#计算output_size
out_height = (input_height-1)*stride_height - 2*padding_height + kernel_size_height
out_width = (input_width-1)*stride_width - 2*padding_width + kernel_size_width
output_size = [input_size[0],input_channels,out_height,out_width]
else:
| |
@type {number} */
var fixedDim = (horz ? height : width) / valueLength;
if (horz) {
if (value.x < x || (value.x > x + width || (value.y > y + height || value.y < y))) {
return false;
}
} else {
if (value.x < x || (value.x > x + width || (value.y > y || value.y < y - height))) {
return false;
}
}
/** @type {number} */
var i = 0;
var valuesLen = codeSegments.length;
for (;i < valuesLen;i++) {
var delta = codeSegments[i];
if (horz) {
var limit = y + fixedDim * i;
if (value.x <= x + delta && (value.y >= limit && value.y <= limit + fixedDim)) {
return{
name : opt_attributes.getData("stringArray")[i],
color : opt_attributes.getData("colorArray")[i],
value : opt_attributes.getData("valueArray")[i],
label : opt_attributes.name
};
}
} else {
limit = x + fixedDim * i;
if (value.x >= limit && (value.x <= limit + fixedDim && value.y >= y - delta)) {
return{
name : opt_attributes.getData("stringArray")[i],
color : opt_attributes.getData("colorArray")[i],
value : opt_attributes.getData("valueArray")[i],
label : opt_attributes.name
};
}
}
}
return false;
}
}
});
$jit.BarChart = new Class({
st : null,
colors : ["#416D9C", "#70A35E", "#EBB056", "#C74243", "#83548B", "#909291", "#557EAA"],
selected : {},
busy : false,
/**
* @param {?} controller
* @return {undefined}
*/
initialize : function(controller) {
this.controller = this.config = $.merge(Options("Canvas", "Margin", "Label", "BarChart"), {
Label : {
type : "Native"
}
}, controller);
var showLabels = this.config.showLabels;
var typeLabels = $.type(showLabels);
var showAggregates = this.config.showAggregates;
var typeAggregates = $.type(showAggregates);
this.config.showLabels = typeLabels == "function" ? showLabels : $.lambda(showLabels);
this.config.showAggregates = typeAggregates == "function" ? showAggregates : $.lambda(showAggregates);
this.initializeViz();
},
/**
* @return {undefined}
*/
initializeViz : function() {
var config = this.config;
var that = this;
var nodeType = config.type.split(":")[0];
/** @type {boolean} */
var horz = config.orientation == "horizontal";
var nodeLabels = {};
var delegate = new $jit.ST({
injectInto : config.injectInto,
width : config.width,
height : config.height,
orientation : horz ? "left" : "bottom",
levelDistance : 0,
siblingOffset : config.barsOffset,
subtreeOffset : 0,
withLabels : config.Label.type != "Native",
useCanvas : config.useCanvas,
Label : {
type : config.Label.type
},
Node : {
overridable : true,
type : "barchart-" + nodeType,
align : "left",
width : 1,
height : 1
},
Edge : {
type : "none"
},
Tips : {
enable : config.Tips.enable,
type : "Native",
force : true,
/**
* @param {?} from
* @param {?} type
* @param {?} event
* @return {undefined}
*/
onShow : function(from, type, event) {
var lab = event;
config.Tips.onShow(from, lab, type);
}
},
Events : {
enable : true,
type : "Native",
/**
* @param {?} adj
* @param {?} lab
* @param {?} selector
* @return {undefined}
*/
onClick : function(adj, lab, selector) {
if (!config.Events.enable) {
return;
}
var from = lab.getContains();
config.Events.onClick(from, lab, selector);
},
/**
* @param {?} adj
* @param {?} lab
* @param {?} event
* @return {undefined}
*/
onMouseMove : function(adj, lab, event) {
if (!config.hoveredColor) {
return;
}
if (adj) {
var elem = lab.getContains();
that.select(adj.id, elem.name, elem.index);
} else {
that.select(false, false, false);
}
}
},
/**
* @param {?} adj
* @param {?} lab
* @return {undefined}
*/
onCreateLabel : function(adj, lab) {
var labelConf = config.Label;
var reversed = lab.getData("valueArray");
var acumLeft = $.reduce(reversed, function(far, near) {
return far + near;
}, 0);
var nlbs = {
wrapper : document.createElement("div"),
aggregate : document.createElement("div"),
label : document.createElement("div")
};
/** @type {Element} */
var wrapper = nlbs.wrapper;
/** @type {Element} */
var label = nlbs.label;
/** @type {Element} */
var aggregate = nlbs.aggregate;
/** @type {(CSSStyleDeclaration|null)} */
var wrapperStyle = wrapper.style;
/** @type {(CSSStyleDeclaration|null)} */
var labelStyle = label.style;
/** @type {(CSSStyleDeclaration|null)} */
var aggregateStyle = aggregate.style;
nodeLabels[lab.id] = nlbs;
wrapper.appendChild(label);
wrapper.appendChild(aggregate);
if (!config.showLabels(lab.name, acumLeft, lab)) {
/** @type {string} */
labelStyle.display = "none";
}
if (!config.showAggregates(lab.name, acumLeft, lab)) {
/** @type {string} */
aggregateStyle.display = "none";
}
/** @type {string} */
wrapperStyle.position = "relative";
/** @type {string} */
wrapperStyle.overflow = "visible";
/** @type {string} */
wrapperStyle.fontSize = labelConf.size + "px";
wrapperStyle.fontFamily = labelConf.family;
wrapperStyle.color = labelConf.color;
/** @type {string} */
wrapperStyle.textAlign = "center";
/** @type {string} */
aggregateStyle.position = labelStyle.position = "absolute";
adj.style.width = lab.getData("width") + "px";
adj.style.height = lab.getData("height") + "px";
/** @type {string} */
aggregateStyle.left = labelStyle.left = "0px";
label.innerHTML = lab.name;
adj.appendChild(wrapper);
},
/**
* @param {?} adj
* @param {?} lab
* @return {undefined}
*/
onPlaceLabel : function(adj, lab) {
if (!nodeLabels[lab.id]) {
return;
}
var labels = nodeLabels[lab.id];
var style = labels.wrapper.style;
var labelStyle = labels.label.style;
var aggregateStyle = labels.aggregate.style;
/** @type {boolean} */
var grouped = config.type.split(":")[0] == "grouped";
/** @type {boolean} */
var horz = config.orientation == "horizontal";
var dimArray = lab.getData("dimArray");
var valArray = lab.getData("valueArray");
var w = grouped && horz ? Math.max.apply(null, dimArray) : lab.getData("width");
var height = grouped && !horz ? Math.max.apply(null, dimArray) : lab.getData("height");
/** @type {number} */
var font = parseInt(style.fontSize, 10);
var styleDeclaration = adj.style;
if (dimArray && valArray) {
/** @type {string} */
style.width = aggregateStyle.width = labelStyle.width = adj.style.width = w + "px";
/** @type {number} */
var i = 0;
var l = valArray.length;
/** @type {number} */
var acum = 0;
for (;i < l;i++) {
if (dimArray[i] > 0) {
acum += valArray[i];
}
}
if (config.showLabels(lab.name, acum, lab)) {
/** @type {string} */
labelStyle.display = "";
} else {
/** @type {string} */
labelStyle.display = "none";
}
var aggValue = config.showAggregates(lab.name, acum, lab);
if (aggValue !== false) {
/** @type {string} */
aggregateStyle.display = "";
} else {
/** @type {string} */
aggregateStyle.display = "none";
}
if (config.orientation == "horizontal") {
/** @type {string} */
aggregateStyle.textAlign = "right";
/** @type {string} */
labelStyle.textAlign = "left";
/** @type {string} */
labelStyle.textIndex = aggregateStyle.textIndent = config.labelOffset + "px";
/** @type {string} */
aggregateStyle.top = labelStyle.top = (height - font) / 2 + "px";
/** @type {string} */
adj.style.height = style.height = height + "px";
} else {
/** @type {string} */
aggregateStyle.top = -font - config.labelOffset + "px";
/** @type {string} */
labelStyle.top = config.labelOffset + height + "px";
/** @type {string} */
adj.style.top = parseInt(adj.style.top, 10) - height + "px";
/** @type {string} */
adj.style.height = style.height = height + "px";
}
labels.aggregate.innerHTML = aggValue !== true ? aggValue : acum;
}
}
});
var $cont = delegate.canvas.getSize();
var margin = config.Margin;
if (horz) {
/** @type {number} */
delegate.config.offsetX = $cont.width / 2 - margin.left - (config.showLabels && config.labelOffset + config.Label.size);
/** @type {number} */
delegate.config.offsetY = (margin.bottom - margin.top) / 2;
} else {
delegate.config.offsetY = -$cont.height / 2 + margin.bottom + (config.showLabels && config.labelOffset + config.Label.size);
/** @type {number} */
delegate.config.offsetX = (margin.right - margin.left) / 2;
}
this.delegate = delegate;
this.canvas = this.delegate.canvas;
},
/**
* @param {Object} json
* @return {undefined}
*/
loadJSON : function(json) {
if (this.busy) {
return;
}
/** @type {boolean} */
this.busy = true;
/** @type {number} */
var prefix = $.time();
/** @type {Array} */
var ch = [];
var delegate = this.delegate;
var name = $.splat(json.label);
var color = $.splat(json.color || this.colors);
var config = this.config;
/** @type {boolean} */
var gradient = !!config.type.split(":")[1];
var animate = config.animate;
/** @type {boolean} */
var isH = config.orientation == "horizontal";
var that = this;
/** @type {number} */
var i = 0;
var values = json.values;
var valuesLen = values.length;
for (;i < valuesLen;i++) {
var value = values[i];
var valArray = $.splat(values[i].values);
/** @type {number} */
var F = 0;
ch.push({
id : prefix + value.label,
name : value.label,
data : {
value : valArray,
"$valueArray" : valArray,
"$colorArray" : color,
"$stringArray" : name,
"$gradient" : gradient,
"$config" : config
},
children : []
});
}
var root = {
id : prefix | |
<filename>xbout/tests/test_load.py
from pathlib import Path
import re
import pytest
import numpy as np
from xarray import DataArray, Dataset, concat
from xarray.tests.test_dataset import create_test_data
import xarray.testing as xrt
from natsort import natsorted
from xbout.load import (_check_filetype, _expand_wildcards, _expand_filepaths,
_arrange_for_concatenation, _trim, _infer_contains_boundaries,
open_boutdataset, _BOUT_PER_PROC_VARIABLES)
from xbout.utils import _separate_metadata
def test_check_extensions(tmpdir):
files_dir = tmpdir.mkdir("data")
example_nc_file = files_dir.join('example.nc')
example_nc_file.write("content_nc")
filetype = _check_filetype(Path(str(example_nc_file)))
assert filetype == 'netcdf4'
example_hdf5_file = files_dir.join('example.h5netcdf')
example_hdf5_file.write("content_hdf5")
filetype = _check_filetype(Path(str(example_hdf5_file)))
assert filetype == 'h5netcdf'
example_invalid_file = files_dir.join('example.txt')
example_hdf5_file.write("content_txt")
with pytest.raises(IOError):
filetype = _check_filetype(Path(str(example_invalid_file)))
class TestPathHandling:
def test_glob_expansion_single(self, tmpdir):
files_dir = tmpdir.mkdir("data")
example_file = files_dir.join('example.0.nc')
example_file.write("content")
path = Path(str(example_file))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
path = Path(str(files_dir.join('example.*.nc')))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
@pytest.mark.parametrize("ii, jj", [(1, 1), (1, 4), (3, 1), (5, 3), (12, 1),
(1, 12), (121, 2), (3, 111)])
def test_glob_expansion_both(self, tmpdir, ii, jj):
files_dir = tmpdir.mkdir("data")
filepaths = []
for i in range(ii):
example_run_dir = files_dir.mkdir('run' + str(i))
for j in range(jj):
example_file = example_run_dir.join('example.' + str(j) + '.nc')
example_file.write("content")
filepaths.append(Path(str(example_file)))
expected_filepaths = natsorted(filepaths,
key=lambda filepath: str(filepath))
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_wildcards(path)
assert actual_filepaths == expected_filepaths
def test_no_files(self, tmpdir):
files_dir = tmpdir.mkdir("data")
with pytest.raises(IOError):
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_filepaths(path)
@pytest.fixture()
def create_filepaths():
return _create_filepaths
def _create_filepaths(nxpe=1, nype=1, nt=1):
filepaths = []
for t in range(nt):
for i in range(nype):
for j in range(nxpe):
file_num = (j + nxpe * i)
path = './run{}'.format(str(t)) \
+ '/BOUT.dmp.{}.nc'.format(str(file_num))
filepaths.append(path)
return filepaths
class TestArrange:
def test_arrange_single(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, None]
def test_arrange_along_x(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc',
'./run0/BOUT.dmp.1.nc',
'./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, 'x']
def test_arrange_along_y(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=3, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc'],
['./run0/BOUT.dmp.1.nc'],
['./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=3)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', None]
def test_arrange_along_t(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=3)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']],
[['./run1/BOUT.dmp.0.nc']],
[['./run2/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, None]
def test_arrange_along_xy(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', 'x']
def test_arrange_along_xt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, 'x']
def test_arrange_along_xyt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc'],
['./run1/BOUT.dmp.3.nc', './run1/BOUT.dmp.4.nc', './run1/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', 'y', 'x']
@pytest.fixture()
def bout_xyt_example_files(tmpdir_factory):
return _bout_xyt_example_files
def _bout_xyt_example_files(tmpdir_factory, prefix='BOUT.dmp', lengths=(6, 2, 4, 7),
nxpe=4, nype=2, nt=1, guards={}, syn_data_type='random',
grid=None, squashed=False):
"""
Mocks up a set of BOUT-like netCDF files, and return the temporary test directory containing them.
Deletes the temporary directory once that test is done.
"""
save_dir = tmpdir_factory.mktemp("data")
if squashed:
# create a single data-file, but alter the 'nxpe' and 'nype' variables, as if the
# file had been created by combining a set of BOUT.dmp.*.nc files
ds_list, file_list = create_bout_ds_list(prefix=prefix, lengths=lengths, nxpe=1,
nype=1, nt=nt, guards=guards,
syn_data_type=syn_data_type)
ds_list[0]['nxpe'] = nxpe
ds_list[0]['nype'] = nype
else:
ds_list, file_list = create_bout_ds_list(prefix=prefix, lengths=lengths,
nxpe=nxpe, nype=nype, nt=nt,
guards=guards,
syn_data_type=syn_data_type)
for ds, file_name in zip(ds_list, file_list):
ds.to_netcdf(str(save_dir.join(str(file_name))))
if grid is not None:
xsize = lengths[1]*nxpe
ysize = lengths[2]*nype
grid_ds = create_bout_grid_ds(xsize=xsize, ysize=ysize, guards=guards)
grid_ds.to_netcdf(str(save_dir.join(grid + ".nc")))
# Return a glob-like path to all files created, which has all file numbers replaced
# with a single asterix
path = str(save_dir.join(str(file_list[-1])))
count = 1
if nt > 1:
count += 1
# We have to reverse the path before limiting the number of numbers replaced so that the
# tests don't get confused by pytest's persistent temporary directories (which are also designated
# by different numbers)
glob_pattern = (re.sub(r'\d+', '*', path[::-1], count=count))[::-1]
return glob_pattern
def create_bout_ds_list(prefix, lengths=(6, 2, 4, 7), nxpe=4, nype=2, nt=1, guards={},
syn_data_type='random'):
"""
Mocks up a set of BOUT-like datasets.
Structured as though they were produced by a x-y parallelised run with multiple restarts.
"""
file_list = []
ds_list = []
for i in range(nxpe):
for j in range(nype):
num = (i + nxpe * j)
filename = prefix + "." + str(num) + ".nc"
file_list.append(filename)
# Include guard cells
upper_bndry_cells = {dim: guards.get(dim) for dim in guards.keys()}
lower_bndry_cells = {dim: guards.get(dim) for dim in guards.keys()}
ds = create_bout_ds(syn_data_type=syn_data_type, num=num, lengths=lengths, nxpe=nxpe, nype=nype,
xproc=i, yproc=j, guards=guards)
ds_list.append(ds)
# Sort this in order of num to remove any BOUT-specific structure
ds_list_sorted = [ds for filename, ds in sorted(zip(file_list, ds_list))]
file_list_sorted = [filename for filename, ds in sorted(zip(file_list, ds_list))]
return ds_list_sorted, file_list_sorted
def create_bout_ds(syn_data_type='random', lengths=(6, 2, 4, 7), num=0, nxpe=1, nype=1,
xproc=0, yproc=0, guards={}):
# Set the shape of the data in this dataset
t_length, x_length, y_length, z_length = lengths
mxg = guards.get('x', 0)
myg = guards.get('y', 0)
x_length += 2*mxg
y_length += 2*myg
shape = (t_length, x_length, y_length, z_length)
# calculate global nx, ny and nz
nx = nxpe*lengths[1] + 2*mxg
ny = nype*lengths[2]
nz = 1*lengths[3]
# Fill with some kind of synthetic data
if syn_data_type is 'random':
# Each dataset contains unique random noise
np.random.seed(seed=num)
data = np.random.randn(*shape)
elif syn_data_type is 'linear':
# Variables increase linearly across entire domain
data = DataArray(-np.ones(shape), dims=('t', 'x', 'y', 'z'))
t_array = DataArray((nx - 2*mxg)*ny*nz*np.arange(t_length, dtype=float),
dims='t')
x_array = DataArray(ny*nz*(xproc*lengths[1] + mxg
+ np.arange(lengths[1], dtype=float)),
dims='x')
y_array = DataArray(nz*(yproc*lengths[2] + myg
+ np.arange(lengths[2], dtype=float)),
dims='y')
z_array = DataArray(np.arange(z_length, dtype=float), dims='z')
data[:, mxg:x_length-mxg, myg:y_length-myg, :] = (
t_array + x_array + y_array + z_array
)
elif syn_data_type is 'stepped':
# Each dataset contains a different number depending on the filename
data = np.ones(shape) * num
elif isinstance(syn_data_type, int):
data = np.ones(shape)* syn_data_type
else:
raise ValueError('Not a recognised choice of type of synthetic bout data.')
T = DataArray(data, dims=['t', 'x', 'y', 'z'])
n = DataArray(data, dims=['t', 'x', 'y', 'z'])
ds = Dataset({'n': n, 'T': T})
# BOUT_VERSION needed so that we know that number of points in z is MZ, not MZ-1 (as
# it was in BOUT++ before v4.0
ds['BOUT_VERSION'] = 4.3
# Include grid data
ds['NXPE'] = nxpe
ds['NYPE'] = nype
ds['NZPE'] = 1
ds['PE_XIND'] = xproc
ds['PE_YIND'] = yproc
ds['MYPE'] = num
ds['MXG'] = mxg
ds['MYG'] = myg
ds['nx'] = nx
ds['ny'] = ny
ds['nz'] = nz
ds['MZ'] = 1*lengths[3]
ds['MXSUB'] = lengths[1]
ds['MYSUB'] = lengths[2]
ds['MZSUB'] = lengths[3]
ds['ixseps1'] = nx
ds['ixseps2'] = nx
ds['jyseps1_1'] = 0
ds['jyseps1_2'] = ny
ds['jyseps2_1'] = ny//2 - 1
ds['jyseps2_2'] = ny//2 - 1
ds['ny_inner'] = ny//2
one = DataArray(np.ones((x_length, y_length)), dims=['x', 'y'])
zero = DataArray(np.zeros((x_length, y_length)), dims=['x', 'y'])
ds['zperiod'] = 1
ds['ZMIN'] = 0.
ds['ZMAX'] = 2.*np.pi
ds['g11'] = one
ds['g22'] = one
ds['g33'] = one
ds['g12'] = zero
ds['g13'] = zero
ds['g23'] = zero
ds['g_11'] = one
ds['g_22'] = one
ds['g_33'] = one
ds['g_12'] = zero
ds['g_13'] = zero
ds['g_23'] = zero
ds['G1'] = zero
ds['G2'] = zero
ds['G3'] = zero
ds['J'] = one
ds['Bxy'] = one
ds['zShift'] = zero
ds['dx'] = 0.5*one
ds['dy'] = 2.*one
ds['dz'] = 0.7
ds['iteration'] = t_length
ds['t_array'] = DataArray(np.arange(t_length, dtype=float)*10., dims='t')
return ds
def create_bout_grid_ds(xsize=2, ysize=4, guards={}):
# Set the shape of the data in this dataset
mxg = guards.get('x', 0)
myg = guards.get('y', 0)
xsize += 2*mxg
ysize += 2*myg
shape = (xsize, ysize)
data = DataArray(np.ones(shape), dims=['x', 'y'])
ds = Dataset({'psixy': data, 'Rxy': data, 'Zxy': data, 'hthe': data})
return ds
# Note, MYPE, PE_XIND and PE_YIND not included, since they are different for each
# processor and so are dropped when loading datasets.
METADATA_VARS = ['BOUT_VERSION', 'NXPE', 'NYPE', 'NZPE', 'MXG', 'MYG', 'nx', 'ny', 'nz',
'MZ', 'MXSUB', 'MYSUB', 'MZSUB', 'ixseps1', 'ixseps2', 'jyseps1_1',
'jyseps1_2', 'jyseps2_1', 'jyseps2_2', 'ny_inner', 'zperiod', 'ZMIN',
'ZMAX', 'dz', 'iteration']
class TestStripMetadata():
def test_strip_metadata(self):
original = create_bout_ds()
assert | |
<filename>trainer.py
from __future__ import print_function
from six.moves import range
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
import numpy as np
import os
import time
from PIL import Image, ImageFont, ImageDraw
from copy import deepcopy
from miscc.config import cfg
from miscc.utils import mkdir_p
from tensorboard import summary
from tensorboard import FileWriter
import tensorflow as tf
import model
from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3
# ################## Shared functions ###################
def compute_mean_covariance(img):
batch_size = img.size(0)
channel_num = img.size(1)
height = img.size(2)
width = img.size(3)
num_pixels = height * width
# batch_size * channel_num * 1 * 1
mu = img.mean(2, keepdim=True).mean(3, keepdim=True)
# batch_size * channel_num * num_pixels
img_hat = img - mu.expand_as(img)
img_hat = img_hat.view(batch_size, channel_num, num_pixels)
# batch_size * num_pixels * channel_num
img_hat_transpose = img_hat.transpose(1, 2)
# batch_size * channel_num * channel_num
covariance = torch.bmm(img_hat, img_hat_transpose)
covariance = covariance / num_pixels
return mu, covariance
def KL_loss(mu, logvar): ## average kld on each dim
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def ce_loss(logq, p, average=True):
N= 1
if average:
# N= p.shape[0]* p.shape[1] ## !!wrong
N= p.shape[0]
return -torch.sum(p* logq)/N
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def compute_inception_score(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
kl = part * \
(np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def negative_log_posterior_probability(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
result = -1. * np.log(np.max(part, 1))
result = np.mean(result)
scores.append(result)
return np.mean(scores), np.std(scores)
def load_network(gpus): ## build, parallel, load networks
print("+++Load/create network")
shareGs= model.get_shareGs(cfg.GAN.GF_DIM)
netG = G_NET(shareGs)
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
print(netG)
entity_netG= model.COND_G_NET(cfg.GAN.ENTITY_DIM+ 1+ cfg.TEXT.DIMENSION, shareGs)
entity_netG.apply(weights_init)
entity_netG = torch.nn.DataParallel(entity_netG, device_ids=gpus)
print(entity_netG)
netsD = []
# entity_netsD= []
if cfg.TREE.BRANCH_NUM > 0:
# netsD.append(D_NET64())
# entity_netsD.append(model.ENTITY_D_NET64())
## use joint D --- 2.27.2019
netsD.append(model.JOINT_D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
if cfg.TREE.SCALE==2:
netsD.append(model.JOINT_D_NET128())
else:
netsD.append(model.JOINT_D_NET256()) ## x4 scale
if cfg.TREE.BRANCH_NUM > 2:
assert False, 'br3 todo'
# netsD.append(D_NET256())
# entity_netsD.append(model.ENTITY_D_NET256())
for i in range(len(netsD)):
netsD[i].apply(weights_init)
netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
# print(netsD[i])
# entity_netsD[i].apply(weights_init)
# entity_netsD[i] = torch.nn.DataParallel(entity_netsD[i], device_ids=gpus)
print('Num of netsD', len(netsD))
# print('Num of entity_netsD', len(entity_netsD))
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
if cfg.CUDA:
# netG.cuda()
entity_netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
# entity_netsD[i].cuda()
# inception_model = inception_model.cuda()
# inception_model.eval() ## set eval mode
# return netG, entity_netG, shareGs, netsD, len(netsD), count
return entity_netG, shareGs, netsD, len(netsD), count
def define_optimizers(netG, netsD=[]):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
# G_opt_paras = []
# for p in netG.parameters():
# if p.requires_grad:
# G_opt_paras.append(p)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def save_model(netG, avg_param_G, netsD, epoch, model_dir):
load_params(netG, avg_param_G) ## save avg params, change G??
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
# sup_real_img = summary.image('real_img', real_img_set)
# summary_writer.add_summary(sup_real_img, count)
for i in range(num_imgs): ## stages
fake_img = fake_imgs[i][0:num]
# The range of fake_img.data (i.e., self.fake_imgs[i][0:num])
# is still [-1. 1]...
vutils.save_image(
fake_img.data, '%s/epoch_%03d_fake_samples%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
# sup_fake_img = summary.image('fake_img%d' % i, fake_img_set)
# summary_writer.add_summary(sup_fake_img, count)
# summary_writer.flush()
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, imsize):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0]) ## as default
#torch.cuda.device(self.gpus[0]) ## as default
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.data_loader = data_loader
self.num_batches = len(self.data_loader) ## batches number, drop last
def prepare_data(self, data):
imgs, w_imgs, t_embedding, cls, _ = data
#print(cls, cls.shape)
#assert torch.max(cls)<200
# print(imgs)
# print(cls[0], cls.shape)
# cls= cls.long()
# cls-=1 ## from 0 to 199
real_vimgs, wrong_vimgs = [], []
if cfg.CUDA:
vembedding = Variable(t_embedding).cuda()
vcls= Variable(cls).cuda()
else:
vembedding = Variable(t_embedding)
vcls= Variable(cls)
for i in range(self.num_Ds):
if cfg.CUDA:
real_vimgs.append(Variable(imgs[i]).cuda()) ## stages
wrong_vimgs.append(Variable(w_imgs[i]).cuda())
else:
real_vimgs.append(Variable(imgs[i]))
wrong_vimgs.append(Variable(w_imgs[i]))
return imgs, real_vimgs, wrong_vimgs, vembedding, vcls ## imgs for save real images
def onehot(self, cls_vec, n):
bs= cls_vec.shape[0]
one_hot= torch.zeros(bs, n)
for i in range(bs):
one_hot[i, cls_vec[i]]= 1
if cfg.CUDA:
one_hot= Variable(one_hot).cuda()
else:
one_hot= Variable(one_hot)
return one_hot
def multihot(self, classes_vec, n):
## class num from 1; classes_vec is 2d list, cols are different
# bs= classes_vec.shape[0]
bs= self.batch_size
print(bs, classes_vec)
multi_hot= torch.zeros(bs, n)
for i in range(bs):
print(i)
if not classes_vec[i]:
multi_hot[i, n-1]= 1
continue
for j in classes_vec[i]:
multi_hot[i, j-1]= 1
if cfg.CUDA:
multi_hot= Variable(multi_hot).cuda()
else:
multi_hot= Variable(multi_hot)
return multi_hot
def train_entity_Dnet(self, idx, count):
flag = count % 100
batch_size = self.real_imgs[0].size(0)
entity_netD, optD = self.entity_netsD[idx], self.entity_optsD[idx]
real_imgs = self.real_imgs[idx]
fake_imgs = self.fake_imgs[idx]
entity_netD.zero_grad()
# Forward
real_labels = self.real_labels[:batch_size]
fake_labels = self.fake_labels[:batch_size]
real_tp, real_cp = entity_netD(real_imgs)
fake_tp, fake_cp = entity_netD(fake_imgs.detach())
# for reality
errD_real = self.bce_logit(real_tp, real_labels)+ self.bce_logit(fake_tp, fake_labels)
# for entity class
errD_class = self.nll(real_cp, self.cls)+ self.nll(fake_cp, torch.ones(batch_size).long().cuda()* cfg.GAN.ENTITY_DIM)
errD= errD_real+ errD_class
# backward
errD.backward()
# update parameters
optD.step()
# log
if flag == 0:
summary_D = summary.scalar('entity_D_loss%d' % idx, errD.data[0])
self.summary_writer.add_summary(summary_D, count)
return errD
def loss_entity_Gnet(self, count):
#self.entity_optG.zero_grad()
flag = count % 100 ## log each iter 100
batch_size = self.real_imgs[0].size(0)
mu, logvar = self.entity_mu, self.entity_logvar ## entity ca
real_labels = self.real_labels[:batch_size]
errG_total = 0
for i in range(self.num_Ds):
tp, cp = self.entity_netsD[i](self.fake_imgs[i])
errG = self.bce_logit(tp, real_labels)+ self.nll(cp, self.cls)
errG_total = errG_total + errG # add all stage generators losses
if flag == 0:
summary_D = summary.scalar('G_loss%d' % i, errG.data[0])
self.summary_writer.add_summary(summary_D, count)
kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL
errG_total = errG_total + kl_loss ## add kl
#errG_total.backward(retain_graph= True) ## *update together
# errG_total.backward()
# self.entity_optG.step()
return kl_loss, errG_total
def train_joint_Dnet(self, idx, count):
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu = self.criterion, self.mu # mean text embedding
netD, optD = self.netsD[idx], self.optimizersD[idx]
real_imgs = self.real_imgs[idx]
wrong_imgs = self.wrong_imgs[idx] # mismatch imgs
fake_imgs = self.fake_imgs[idx]
#
netD.zero_grad()
# Forward
real_labels = self.real_labels[:batch_size]
fake_labels = self.fake_labels[:batch_size]
real_logits = netD(real_imgs, mu.detach())
wrong_logits = netD(wrong_imgs, mu.detach()) ## mismatch
fake_logits = netD(fake_imgs.detach(), mu.detach())
# for matching, real or not for pair data
errD_real = criterion(real_logits[0], real_labels)
errD_wrong = criterion(wrong_logits[0], fake_labels)
errD_fake = criterion(fake_logits[0], fake_labels)
# for reality
if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: ## uncond coeff 1.0
errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * criterion(real_logits[1], real_labels)
errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * criterion(wrong_logits[1], real_labels)
errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * criterion(fake_logits[1], fake_labels)
#
# errD_real = errD_real + errD_real_uncond
# errD_wrong = errD_wrong + errD_wrong_uncond ##? double real input for uncond
# errD_fake = errD_fake + errD_fake_uncond
errD_match = errD_real+ | |
row_start
if isinstance(key[1], int):
st1 = col_inds[key[1]] - col_start
sp1 = col_inds[key[1] + 1] - col_start
elif isinstance(key[1], slice):
start = col_inds[key[1].start] if key[1].start is not None else 0
stop = col_inds[key[1].stop] if key[1].stop is not None else col_inds[-1]
st1, sp1 = start - col_start, stop - col_start
return st0, sp0, st1, sp1
def __getitem__(self, key: Union[int, slice, Tuple[int, slice, ...]]) -> torch.Tensor:
"""
Returns a local selection of the DNDarray corresponding to the tile/s desired
Standard getitem function for the tiles. The returned item is a view of the original
DNDarray, operations which are done to this view will change the original array.
**STRIDES ARE NOT AVAILABLE, NOR ARE CROSS-SPLIT SLICES**
Parameters
----------
key : int, slice, tuple
indices of the tile/s desired
Examples
--------
>>> a = ht.zeros((12, 10), split=0)
>>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles
>>> print(a_tiles[2, 3])
[0/1] None
[1/1] tensor([[0., 0.],
[1/1] [0., 0.]])
>>> print(a_tiles[2])
[0/1] None
[1/1] tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1/1] [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> print(a_tiles[0:2, 1])
[0/1] tensor([[0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.]])
[1/1] None
"""
arr = self.__DNDarray
tile_map = self.__tile_map
local_arr = arr.larray
if not isinstance(key, (int, tuple, slice)):
raise TypeError(
"key must be an int, tuple, or slice, is currently {}".format(type(key))
)
involved_procs = tile_map[key][..., 2].unique()
if involved_procs.nelement() == 1 and involved_procs == arr.comm.rank:
st0, sp0, st1, sp1 = self.get_start_stop(key=key)
return local_arr[st0:sp0, st1:sp1]
elif involved_procs.nelement() > 1:
raise ValueError("Slicing across splits is not allowed")
else:
return None
def local_get(self, key: Union[int, slice, Tuple[int, slice, ...]]) -> torch.Tensor:
"""
Returns the local tile/s corresponding to the key given
Getitem routing using local indices, converts to global indices then uses getitem
Parameters
----------
key : int, slice, tuple, list
Indices of the tile/s desired.
If the stop index of a slice is larger than the end will be adjusted to the maximum
allowed
Examples
--------
See local_set function.
"""
rank = self.__DNDarray.comm.rank
key = self.local_to_global(key=key, rank=rank)
return self.__getitem__(key)
def local_set(
self, key: Union[int, slice, Tuple[int, slice, ...]], value: Union[int, float, torch.Tensor]
):
"""
Setitem routing to set data to a local tile (using local indices)
Parameters
----------
key : int or slice or Tuple[int,...]
Indices of the tile/s desired
If the stop index of a slice is larger than the end will be adjusted to the maximum
allowed
value : torch.Tensor or int or float
Data to be written to the tile
Examples
--------
>>> a = ht.zeros((11, 10), split=0)
>>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles
>>> local = a_tiles.local_get(key=slice(None))
>>> a_tiles.local_set(key=slice(None), value=torch.arange(local.numel()).reshape(local.shape))
>>> print(a.larray)
[0/1] tensor([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[0/1] [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
[0/1] [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.],
[0/1] [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],
[0/1] [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],
[0/1] [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.]])
[1/1] tensor([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[1/1] [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
[1/1] [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.],
[1/1] [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],
[1/1] [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.]])
>>> a.lloc[:] = 0
>>> a_tiles.local_set(key=(0, 2), value=10)
[0/1] tensor([[ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.],
[0/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.],
[0/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.],
[0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
[1/1] tensor([[ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.],
[1/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.],
[1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> a_tiles.local_set(key=(slice(None), 1), value=10)
[0/1] tensor([[ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.]])
[1/1] tensor([[ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.],
[1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.]])
"""
rank = self.__DNDarray.comm.rank
key = self.local_to_global(key=key, rank=rank)
self.__getitem__(tuple(key)).__setitem__(slice(0, None), value)
def local_to_global(
self, key: Union[int, slice, Tuple[int, slice, ...]], rank: int
) -> Tuple[int, slice, ...]:
"""
Convert local indices to global indices
Parameters
----------
key : int or slice or Tuple or List
Indices of the tile/s desired.
If the stop index of a slice is larger than the end will be adjusted to the maximum
allowed
rank : int
Process rank
Examples
--------
>>> a = ht.zeros((11, 10), split=0)
>>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles
>>> rank = a.comm.rank
>>> print(a_tiles.local_to_global(key=(slice(None), 1), rank=rank))
[0/1] (slice(0, 2, None), 1)
[1/1] (slice(2, 4, None), 1)
>>> print(a_tiles.local_to_global(key=(0, 2), rank=0))
[0/1] (0, 2)
[1/1] (0, 2)
>>> print(a_tiles.local_to_global(key=(0, 2), rank=1))
[0/1] (2, 2)
[1/1] (2, 2)
"""
arr = self.__DNDarray
if isinstance(key, (int, slice)):
key = [key, slice(0, None)]
else:
key = list(key)
if arr.split == 0:
# need to adjust key[0] to be only on the local tensor
prev_rows = sum(self.__row_per_proc_list[:rank])
loc_rows = self.__row_per_proc_list[rank]
if isinstance(key[0], int):
key[0] += prev_rows
elif isinstance(key[0], slice):
start = key[0].start + prev_rows if key[0].start is not None else prev_rows
stop = key[0].stop + prev_rows if key[0].stop is not None else prev_rows + loc_rows
stop = stop if stop - start < loc_rows else start + loc_rows
key[0] = slice(start, stop)
if arr.split == 1:
loc_cols = self.__col_per_proc_list[rank]
prev_cols = sum(self.__col_per_proc_list[:rank])
# need to adjust key[0] to be only on the local tensor
# need the number of columns *before* the process
if isinstance(key[1], int):
key[1] += prev_cols
elif isinstance(key[1], slice):
start = key[1].start + prev_cols if key[1].start is not None else prev_cols
stop = key[1].stop + prev_cols if key[1].stop is not None else prev_cols + loc_cols
stop = stop if stop - start < loc_cols else start + loc_cols
key[1] = slice(start, stop)
return tuple(key)
def match_tiles(self, tiles_to_match: SquareDiagTiles) -> None:
"""
Function to match the tile sizes of another tile map
Parameters
----------
tiles_to_match : SquareDiagTiles
The tiles which should be matched by the current tiling scheme
Notes
-----
This function overwrites most, if not all, of the elements of this class. Intended for use with the Q matrix,
to match the tiling of a/R. For this to work properly it is required that the 0th dim of both matrices is equal
"""
if not isinstance(tiles_to_match, SquareDiagTiles):
raise TypeError(
"tiles_to_match must be a SquareDiagTiles object, currently: {}".format(
type(tiles_to_match)
)
)
base_dnd = self.__DNDarray
match_dnd = tiles_to_match.__DNDarray
# this map will take the same tile row and column sizes up to the last diagonal row/column
# the last row/column is determined by the | |
<gh_stars>1-10
from __future__ import print_function
__author__ = 'jeremy'
import sys
import os
import cv2
import logging
import time
logging.basicConfig(level=logging.INFO) #debug is actually lower than info: critical/error/warning/info/debug
import shutil
# So this file can be imported on servers where joblib is not installed
try:
from joblib import Parallel,delayed
except:
pass
import numpy as np
import multiprocessing
import copy
#from trendi import constants
#import matplotlib.pyplot as plt
#import matplotlib.patches as mpatches
import subprocess
import inspect
import string
import random
import constants
#import background_removal
#from trendi.paperdoll import neurodoll_falcon_client
#from trendi import Utils
######################
#bounding box specific
######################
def intersectionOverUnion(r1, r2):
'''
r1,r2 in form xywh
:param r1:
:param r2:
:return:
'''
# print(r1, r2)
# a if test else b
intersectionx = int(max(r1[0], r2[0]))
intersectiony = int(max(r1[1], r2[1]))
intersectionw = int(min(r1[0] + r1[2], r2[0] + r2[2])) - int(intersectionx)
if intersectionw < 0:
intersectionw = 0
intersectionh = int(min(r1[1] + r1[3], r2[1] + r2[3])) - int(intersectiony)
if intersectionh < 0:
intersectionh = 0
# intersectionh -= intersectiony;
# print('r1:' + str(r1) + ' r2:' + str(r2) + ' x,y,w,h:' + str(intersectionx) + ',' + str(intersectiony) + ',' + str(
# intersectionw) + ',' + str(
# intersectionh))
totarea = r1[2] * r1[3] + r2[2] * r2[3] # this includes overlap twice
intersectionarea = intersectionw * intersectionh
totarea = totarea - intersectionarea # now totarea includes overlap only once
iou = float(intersectionarea) / float(totarea)
print('totarea,intarea,iou:' + str(totarea) + ',' + str(intersectionarea) + ',' + str(iou))
return (iou)
def intersectionOverMinArea(r1,r2):
'''
r1,r2 in form xywh
:param r1:
:param r2:
:return:
'''
intersectionx = int(max(r1[0], r2[0]))
intersectiony = int(max(r1[1], r2[1]))
intersectionw = int(min(r1[0] + r1[2], r2[0] + r2[2])) - int(intersectionx)
if intersectionw < 0:
intersectionw = 0
intersectionh = int(min(r1[1] + r1[3], r2[1] + r2[3])) - int(intersectiony)
if intersectionh < 0:
intersectionh = 0
# intersectionh -= intersectiony;
# print('r1:' + str(r1) + ' r2:' + str(r2) + ' x,y,w,h:' + str(intersectionx) + ',' + str(intersectiony) + ',' + str(
# intersectionw) + ',' + str(
# intersectionh))
min_area=min(r1[2]*r1[3],r2[2]*r2[3])
intersectionarea = intersectionw * intersectionh
frac = float(intersectionarea) / float(min_area)
print('min_area,intarea,frac:' + str(min_area) + ',' + str(intersectionarea) + ',' + str(frac))
return (frac)
def combine_bbs(bb1_xywh,bb2_xywh):
minx=min(bb1_xywh[0],bb2_xywh[0])
maxx=max(bb1_xywh[0]+bb1_xywh[2],bb2_xywh[0]+bb2_xywh[2])
miny=min(bb1_xywh[1],bb2_xywh[1])
maxy=min(bb1_xywh[1]+bb1_xywh[3],bb2_xywh[1]+bb2_xywh[3])
w=maxx-minx
h=maxy-miny
return(minx,miny,w,h)
def get_person_bb_from_face(face, image_shape):
x, y, w, h, = face
mid_face_x = x + w/2
p_width = 3.5 * w
p_height = 8 * h
# person bb x1,x2,y1,y2
p_x1 = int(round(max(0, mid_face_x - p_width/2)))
p_x2 = int(round(min(image_shape[1], mid_face_x + p_width/2)))
p_y1 = y
p_y2 = int(round(min(image_shape[0], y + p_height)))
return [p_x1, p_y1, p_x2, p_y2]
def fix_bb_x1y1x2y2(bb_x1y1x2y2):
'''fix out-of-order bbs (x2y2x1y1) or right top ,left bottom'''
if bb_x1y1x2y2[0]>bb_x1y1x2y2[2]: #swap x1y1 w. x2y2
tmp=bb_x1y1x2y2[0] #swap x1,x2
bb_x1y1x2y2[0]=bb_x1y1x2y2[2]
bb_x1y1x2y2[2]=tmp
tmp=bb_x1y1x2y2[1] #swap y1,y2
bb_x1y1x2y2[1]=bb_x1y1x2y2[3]
bb_x1y1x2y2[3]=tmp
if bb_x1y1x2y2[1]>bb_x1y1x2y2[3]: # bb is top right instead of top left or something
logging.warning('malformed x1y1x2y2 bb {}, swapping y1 and y2'.format(bb_x1y1x2y2))
# raw_input('ret to cont')
tmp=bb_x1y1x2y2[1] #swap y1,y2, dont swap x
bb_x1y1x2y2[1]=bb_x1y1x2y2[3]
bb_x1y1x2y2[3]=tmp
# print(bb_x1y1x2y2)
return bb_x1y1x2y2
def bb_to_mask(bb, img_array):
'''
bb in form of x,y,w,h converted to np array the same size as img_array
:param bb:
:return:
'''
h, w = img_array.shape[0:2]
mask = np.zeros((img_array.shape[0], img_array.shape[1]), dtype=np.uint8)
if bounding_box_inside_image(img_array, bb):
mask[bb[0]:(bb[0] + bb[2]), bb[1]:(bb[1] + bb[3])] = 1
elif bb[0] + bb[2] <= w and bb[1] + bb[3] <= h: # left and top edges are ok
mask[bb[0]:min(bb[0] + bb[2], w), bb[1]:min(bb[1] + bb[3], h)] = 1
else: # left or top edge not ok so use entire box
mask = np.ones((h, w), dtype=np.uint8)
if mask.shape[0] != img_array.shape[0] or mask.shape[1] != img_array.shape[1]:
print('trouble with mask size in bb_to_mask, resetting to image size')
mask = np.ones((h, w), dtype=np.uint8)
return mask
def is_valid_image(img_array):
if img_array is not None and \
type(img_array) == np.ndarray and\
img_array.shape[0] * img_array.shape[1] >= constants.min_image_area:
return True
else:
return False
def is_valid_local_image_file(img_filename):
img_array = cv2.imread(img_filename)
return is_valid_image(img_array)
def is_valid_local_or_remote_image_file(img_filename):
img_array = get_cv2_img_array(img_filename)
return is_valid_image(img_array)
def get_cv2_img_array(url_or_path_to_image_file_or_cv2_image_array, convert_url_to_local_filename=False, download=False,
download_directory='images', filename=False, replace_https_with_http=True):
"""
Get a cv2 img array from a number of different possible inputs.
:param url_or_path_to_image_file_or_cv2_image_array:
:param convert_url_to_local_filename:
:param download:
:param download_directory:
:return: img_array
"""
# print('get:' + str(url_or_path_to_image_file_or_cv2_image_array) + ' try local' + str(
# convert_url_to_local_filename) + ' download:' + str(download))
got_locally = False
img_array = None # attempt to deal with non-responding url
# first check if we already have a numpy array
if isinstance(url_or_path_to_image_file_or_cv2_image_array, np.ndarray):
img_array = url_or_path_to_image_file_or_cv2_image_array
# otherwise it's probably a string, check what kind
elif isinstance(url_or_path_to_image_file_or_cv2_image_array, basestring):
# try getting url locally by changing url to standard name
if convert_url_to_local_filename: # turn url into local filename and try getting it again
# filename = url_or_path_to_image_file_or_cv2_image_array.split('/')[-1].split('#')[0].split('?')[0]
# jeremy changed this since it didn't work with url -
# https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcR2oSMcnwErH1eqf4k8fvn2bAxvSdDSbp6voC7ijYJStL2NfX6v
# TODO: find a better way to create legal filename from url
filename = \
url_or_path_to_image_file_or_cv2_image_array.split('/')[-1].split('#')[0].split('?')[-1].split(':')[
-1]
filename = os.path.join(download_directory, filename)
if filename.endswith('jpg') or filename.endswith('jpeg') or filename.endswith('.bmp') or \
filename.endswith('tiff'):
pass
else: # there's no 'normal' filename ending so add .jpg
filename = filename + '.jpg'
# print('trying again locally using filename:' + str(filename))
img_array = get_cv2_img_array(filename, convert_url_to_local_filename=False, download=download,
download_directory=download_directory)
# maybe return(get_cv2 etc) instead of img_array =
if img_array is not None:
# print('got ok array calling self locally')
return img_array
else: # couldnt get locally so try remotely
# print('trying again remotely since using local filename didnt work, download=' + str( download) + ' fname:' + str(filename))
return (
get_cv2_img_array(url_or_path_to_image_file_or_cv2_image_array, convert_url_to_local_filename=False,
download=download,
download_directory=download_directory)) # this used to be 'return'
# put images in local directory
else:
# get remotely if its a url, get locally if not
if "://" in url_or_path_to_image_file_or_cv2_image_array:
if replace_https_with_http:
url_or_path_to_image_file_or_cv2_image_array = url_or_path_to_image_file_or_cv2_image_array.replace(
"https", "http")
img_url = url_or_path_to_image_file_or_cv2_image_array
try:
# print("trying remotely (url) ")
headers = {'User-Agent': USER_AGENT}
response = requests.get(img_url, headers=headers) # download
img_array = imdecode(np.asarray(bytearray(response.content)), 1)
except ConnectionError:
logging.warning("connection error - check url or connection")
return None
except:
logging.warning(" error other than connection error - check something other than connection")
return None
else: # get locally, since its not a url
# print("trying locally (not url)")
img_path = url_or_path_to_image_file_or_cv2_image_array
try:
img_array = cv2.imread(img_path)
if img_array is not None:
# print("success trying locally (not url)")
got_locally = True
else:
# print('couldnt get locally (in not url branch)')
return None
except:
# print("could not read locally, returning None")
logging.warning("could not read locally, returning None")
return None # input isn't a basestring nor a np.ndarray....so what is it?
else:
logging.warning("input is neither an ndarray nor a string, so I don't know what to do")
return None
# After we're done with all the above, this should be true - final check that we're outputting a good array
if not (isinstance(img_array, np.ndarray) and isinstance(img_array[0][0], np.ndarray)):
print("Bad image coming into get_cv2_img_array - check url/path/array:" + str(
url_or_path_to_image_file_or_cv2_image_array) + 'try locally' + str(
convert_url_to_local_filename) + ' dl:' + str(
download) + ' dir:' + str(download_directory))
logging.warning("Bad image - check url/path/array:" + str(
url_or_path_to_image_file_or_cv2_image_array) + 'try locally' + str(
convert_url_to_local_filename) + ' dl:' + str(
download) + ' dir:' + str(download_directory))
return (None)
# if we got good image and need to save locally :
if download:
if not got_locally: # only download if we didn't get file locally
if not os.path.isdir(download_directory):
os.makedirs(download_directory)
if "://" in url_or_path_to_image_file_or_cv2_image_array: # its a url, get the bifnocho
if replace_https_with_http:
url_or_path_to_image_file_or_cv2_image_array = url_or_path_to_image_file_or_cv2_image_array.replace(
"https", "http")
filename = \
url_or_path_to_image_file_or_cv2_image_array.split('/')[-1].split('#')[0].split('?')[-1].split(':')[
-1]
filename = os.path.join(download_directory, filename)
else: # its not a url so use straight
filename = os.path.join(download_directory, url_or_path_to_image_file_or_cv2_image_array)
if filename.endswith('jpg') or filename.endswith('jpeg') or filename.endswith('.bmp') or filename.endswith(
'tiff'):
pass
else: # there's no 'normal' filename ending
filename = filename + '.jpg'
try: # write file then open it
# print('filename for local write:' + str(filename))
write_status = imwrite(filename, img_array)
max_i = 50 # wait until file is readable before continuing
gotfile = False
for i in xrange(max_i):
try:
with open(filename, 'rb') as _:
gotfile = True
except IOError:
time.sleep(10)
if gotfile == False:
print('Could not access {} after {} attempts'.format(filename, str(max_i)))
raise IOError('Could not access {} after {} attempts'.format(filename, str(max_i)))
except: # this is prob unneeded given the 'else' above
print('unexpected error in Utils calling imwrite')
return img_array
def count_human_bbs_in_doc(dict_of_images, skip_if_marked_to_skip=True):
n = 0
for | |
<gh_stars>0
#!/usr/bin/python3
"""
http://gitlab.naktuinbouw.net/bioinformatics/gt-seq/issues/18
adding a Sam RG tag to each read containing the flowcell ID, flowcell lane and Sample ID
`RG:Z:C5M8DANXX_3_Mini_Stars_2`
Input is
1: fastq file coming from a GT_seq run
2: xlsx file with barcodes and corresponding sample ID's
Output is
1: fastq file with the tags now attached.
"""
from argparse import ArgumentParser
from openpyxl import load_workbook
import re
import unicodedata
import sys
def argument_parser():
parser = ArgumentParser(description='Options for fastq RG tagger')
parser.add_argument(
'-f', '--fastq',
required=True,
help="the fastq file that will be receiving the tags"
)
parser.add_argument(
'-i', '--info',
required=True,
help="the .csv or .xlsx file with information on the read groups"
)
parser.add_argument(
'-o', '--fastq_out',
help="the new fastq file with tags"
)
parser.add_argument(
'-s', '--split',
help="add this option if you want a fastq file for each read group",
action='store_true'
)
parser.add_argument(
'-u', '--fuzz',
help="fuzzy matching of barcodes",
action='store_true'
)
return parser.parse_args()
def add_header_tag(header_line,readgroups,return_barcode=False):
bc_len = 6
bc_split = '+'
sequencer_dict, read_dict = obtain_read_info(header_line)
barcode_ = [bc[:bc_len] for bc in read_dict['barcode'].strip().split(bc_split)]
barcode = bc_split.join(barcode_)
# check if the barcode is used and with how many mismatches
mismatch = None
RG_lib = None
for i, readgroup_dict in enumerate(readgroups):
if barcode in readgroup_dict:
RG_lib = readgroup_dict[barcode]
mismatch = i
if RG_lib:
if not RG_lib['Flowcell'] == sequencer_dict['flowcell_id']:
print(header_line+" has different flowcell then expected form barcode file")
if not str(RG_lib['Lane']) == sequencer_dict['flowcell_lane']:
print(header_line+" has different lane then expected form barcode file")
RG_tag = RG_lib['ID']
header_line = header_line.split()[0]+' bl:Z:{}\tbr:Z:{}\tbm:Z:{}\tRG:Z:{}\n'.format(
barcode_[0],
barcode_[1],
mismatch,
RG_tag
)
barcode = RG_lib['barcode']
else:
header_line = header_line.split()[0]+' bl:Z:{}\tbr:Z:{}\n'.format(
barcode_[0],
barcode_[1],
)
barcode = None
if return_barcode:
return header_line, barcode
else:
return header_line
def remove_accents(input_str):
"""
taken from https://stackoverflow.com/a/517974/3375944
:param input_str: string with accented letters
:return: string without accented letters
"""
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
def obtain_read_info(header_line):
sequencer_stuff = header_line.split()[0]
read_stuff = header_line.split()[1]
# get_sequencer info from the read header
sequencer_keys = [
'instrument_name',
'run_id',
'flowcell_id',
'flowcell_lane',
'tile_nr',
'cluster_x',
'cluster_y'
]
sequencer_dict = {sequencer_keys[i]:item for i,item in enumerate(sequencer_stuff.split(':'))}
# get read info from the read header
read_keys = [
'pair',
'filtered',
'bit_flag',
'barcode']
read_dict = {read_keys[i]:item for i,item in enumerate(read_stuff.split(':'))}
return sequencer_dict,read_dict
def obtain_barcode_info_xlsx(info_file):
info_lines = []
wb = load_workbook(info_file, data_only=True)
if 'Library' in wb.sheetnames:
info_sheet = wb['Library']
else:
info_sheet = wb.active
info_headers = []
for column in info_sheet.iter_cols(max_row=1):
info_headers.append(column[0].value)
for row in info_sheet.iter_rows(min_row=2):
info_dict = {info_headers[i]:item.value for i,item in enumerate(row)}
# rows with empty cells are not allowed
if all(x for x in info_dict.values()):
info_lines.append(info_dict)
else:
#check if library_info is filled in correctly.
if info_dict['Sample_name']:
if not info_dict['Flowcell'] or not info_dict['Lane']:
raise ValueError ("please fill in Flowcell and Lane from sequence run.")
return info_lines
def obtain_barcode_info_csv(info_file):
'''
takes a csv file containing information of the sequencing library
Puts each line in a dictionary with the headers as value
each line is added to a list and this list is returned
:param info_file:
:return: list of dictionaries of samples
'''
# expected headers:
# Flowcell, Lane, i7_barcode, i5_barcode, Species, Sample_name
info_lines = []
with open(info_file, 'r') as info:
info_headers = re.split('[; ]+|[, ]+',info.readline().strip())
for line in info:
split_line = re.split('[; ]+|[, ]+',line.strip())
info_dict = {info_headers[i]:item for i,item in enumerate(split_line)}
info_lines.append(info_dict)
return info_lines
def create_all_barcode_varients(barcode):
varients = ['A','C','T','G']
barcodes = ['N'+barcode[1:]]
for i,base in enumerate(barcode):
barcodes += [barcode[:i]+x+barcode[i+1:] for x in varients if x != base]
return barcodes
def make_readgroups_dict(library_info_lines,make_var=False):
'''
reads the lines of the info file and returns a python dictionary
:param library_info_lines: lines read from a data sheet
:return: python dictionary with the lines
'''
readgroups_dict = {}
readgroups_dict_1 = {}
readgroups_dict_2 = {}
for library_info in library_info_lines:
readgroup = make_readgroup(library_info, clean_dict=False)
#check if Sample_name used for ReadGroup_id is duplicated
if readgroup['SM'] in [x['SM'] for x in readgroups_dict.values()]:
raise ValueError ("Two samples have the same Sample_name, {}.".format(readgroup['SM']))
readgroup['barcode'] = library_info['i7_barcode']+ '+' + library_info['i5_barcode']
#creating all 1 base permutations so the readgroup can still be found.
if make_var:
i5_overlap_list = ['ACTCGT','AAACGT','AATCTT','AATCGG']
i5_barcodes = create_all_barcode_varients(library_info['i5_barcode'])
i5_barcodes = [bc for bc in i5_barcodes if (bc not in i5_overlap_list)]
for i5_barcode in i5_barcodes:
# one mismatch
barcode = library_info['i7_barcode'] + '+' + i5_barcode
#check if barcodes (and 1 base permutations) are unique
if barcode in readgroups_dict or barcode in readgroups_dict_1 or barcode in readgroups_dict_2:
print(library_info)
raise ValueError("Two samples with the same barcode combination, {}.".format(barcode))
readgroups_dict_1[barcode] = readgroup
for i7_barcode in create_all_barcode_varients(library_info['i7_barcode']):
#one mismatch
barcode = i7_barcode + '+' + library_info['i5_barcode']
#check if barcodes (and 1 base permutations) are unique
if barcode in readgroups_dict or barcode in readgroups_dict_1 or barcode in readgroups_dict_2:
print(library_info)
raise ValueError("Two samples with the same barcode combination, {}.".format(barcode))
readgroups_dict_1[barcode] = readgroup
#both ends have 1 mismatch
for i5_barcode in i5_barcodes:
barcode = i7_barcode + '+' + i5_barcode
#check if barcodes (and 1 base permutations) are unique
if barcode in readgroups_dict or barcode in readgroups_dict_1 or barcode in readgroups_dict_2:
print(library_info)
raise ValueError ("Two samples with the same barcode combination, {}.".format(barcode))
readgroups_dict_2[barcode] = readgroup
barcode = library_info['i7_barcode'] + '+' + library_info['i5_barcode']
#check if barcodes are unique
if barcode in readgroups_dict:
raise ValueError ("Two samples with the same barcode combination, {}.".format(barcode))
readgroups_dict[barcode] = readgroup
return readgroups_dict,readgroups_dict_1,readgroups_dict_2
def make_readgroup(library_info, clean_dict=True):
'''
takes a line, and substitutes the information needed for a readgroup.
:param library_info: line containing relevant information on a sample
:param clean_dict: will add readgroup to a clean dictionary instead of using the library_info input.
:return: dictionary with info of readgroup
'''
#initiate returned library
if clean_dict:
readgroup_dict = {}
else:
readgroup_dict = library_info
# remove problametic charecters.
sample_name = remove_accents(str(library_info['Sample_name']))
# ID
# Read group identifier. Each @RG line must have a unique ID. The value of ID is used in the RG
# tags of alignment records. Must be unique among all read groups in header section. Read group
# IDs may be modified when merging SAM files in order to handle collisions.
readgroup_dict['ID'] = '_'.join([
library_info['Flowcell'],
str(library_info['Lane']),
sample_name
])
# CN
# Name of sequencing center producing the read.
# DS
# Description.
# DT
# Date the run was produced (ISO8601 date or date/time).
# FO
# Flow order. The array of nucleotide bases that correspond to the nucleotides used for each
# flow of each read. Multi-base flows are encoded in IUPAC format, and non-nucleotide flows by
# various other characters. Format: /\*|[ACMGRSVTWYHKDBN]+/
# KS
# The array of nucleotide bases that correspond to the key sequence of each read.
readgroup_dict['KS'] = '+'.join([
library_info['i7_barcode'],
library_info['i5_barcode']
])
# LB
# Library.
readgroup_dict['LB'] = '_'.join([
library_info['Flowcell'],
str(library_info['Lane'])
])
# PG
# Programs used for processing the read group.
# PI
# Predicted median insert size.
# PL
# Platform/technology used to produce the reads. Valid values: CAPILLARY, LS454, ILLUMINA,
# SOLID, HELICOS, IONTORRENT, ONT, and PACBIO
readgroup_dict['PL'] = 'ILLUMINA'
# PM
# Platform model. Free-form text providing further details of the platform/technology used.
# PU
# Platform unit (e.g. flowcell-barcode.lane for Illumina or slide for SOLiD). Unique identifier
# SM
# Sample. Use pool name where a pool is being sequenced.
readgroup_dict['SM'] = sample_name
return readgroup_dict
def main(args):
# establish the readgroups
info_filetype = args.info.split('.')[-1]
if info_filetype == "csv":
library_info_list = obtain_barcode_info_csv(args.info)
elif info_filetype in ["xlsx", "xlsm"]:
library_info_list = obtain_barcode_info_xlsx(args.info)
else:
raise("barcode file extention ."+info_filetype+" is not supported, needs to be .csv or .xlsx")
readgroups,readgroups_1,readgroups_2 = make_readgroups_dict(library_info_list,args.fuzz)
# groups used for split
outfile_groups = {}
for library_info in library_info_list:
barcode = library_info['i7_barcode'] + '+' + library_info['i5_barcode']
if 'Species' in library_info_list[0]:
outfile_groups[barcode] = library_info['Species']
else:
outfile_groups[barcode] = 'main'
# prepare the outfile
outfiles = {}
if args.split:
if args.fastq_out:
out_split = args.fastq_out.split('.')
for group in set(outfile_groups.values()):
group_outname = '.'.join(out_split[:-1])+"."+group.lower()+"."+out_split[-1]
outfiles[group] = open(group_outname, 'w')
other_outname = '.'.join(out_split[:-1])+".nogroup."+out_split[-1]
outfiles['nogroup'] = open(other_outname, 'w')
else:
raise("can't have split withought specyfying the outfile location")
else:
if args.fastq_out:
outfiles['main'] = open(args.fastq_out, 'w')
else:
outfiles['main'] = sys.stdout
# read the entire fastq and fix the lines and write it in the new one
with open(args.fastq, 'r') as fastqfile:
i = 0
read = {}
outfile = sys.stdout
for line | |
0, imh - h))
# insert the crop into src.data[0]
src.data[0].copy_(torch.Tensor(image[:, ox:ox + w, oy:oy + h]))
else:
ox = int((imw - w) / 2)
oy = int((imh - h) / 2)
src.data[0].copy_(torch.Tensor(image[:, ox:ox + w, oy:oy + h]))
else:
ox = 0
oy = 0
src.data[0].copy_(torch.Tensor(image))
sigma = o['start_sigma'] + ((o['end_sigma'] - o['start_sigma']) * i) / o['iter_n']
step_size = o['start_step_size'] + ((o['end_step_size'] - o['start_step_size']) * i) / o['iter_n']
make_step(net, src, bias=bias, scale=scale, sigma=sigma, step_size=step_size, **step_params)
if i % 10 == 0:
print('finished step %d in octave %d' % (i, e))
# insert modified image back into original image (if necessary)
image[:, ox:ox + w, oy:oy + h] = src.data[0].cpu().numpy()
# returning the resulting image
return unprocess(image, mu=bias, sigma=scale)
octaves0 = [
{
'layer':'conv5',
'iter_n':600,
'start_sigma':1.5,
'end_sigma':0.01,
'start_step_size': 12.*0.25,
'end_step_size':0.5*0.25,
},
]
def diverse_deepdraw(net, base_img, octaves, random_crop=True, original_size=None,
bias=None, scale=None, device='cuda', div_metric='correlation',
div_linkage='minimum', div_weight=0, div_mask=1, **step_params):
""" Similar to deepdraw() but including a diversity term among all images a la
Cadena et al., 2018.
Arguments (only those additional to deepdraw):
base_img: (CHANGED) Expects a 4-d array (num_images x height x width x channels).
div_metric (str): What metric to use when computing pairwise differences.
div_linkage (str): How to agglomerate pairwise distances.
div_weight (float): Weight given to the diversity term in the objective function.
div_mask (np.array): Array (height x width) used to mask irrelevant parts of the
image before calculating diversity.
"""
if len(base_img) < 2:
raise ValueError('You need to pass at least two initial images. Did you mean to '
'use deepdraw()?')
# prepare base image
image = process(base_img, mu=bias, sigma=scale) # (num_batches, num_channels, h, w)
# get input dimensions from net
if original_size is None:
print('getting image size:')
c, w, h = image.shape[-3:]
else:
c, w, h = original_size
print("starting drawing")
src = torch.zeros(len(image), c, w, h, requires_grad=True, device=device)
mask = torch.tensor(div_mask, dtype=torch.float32, device=device)
for e, o in enumerate(octaves):
if 'scale' in o:
# resize by o['scale'] if it exists
image = ndimage.zoom(image, (1, 1, o['scale'], o['scale']))
imw, imh = image.shape[-2:]
for i in range(o['iter_n']):
if imw > w:
if random_crop:
# randomly select a crop
# ox = random.randint(0,imw-224)
# oy = random.randint(0,imh-224)
mid_x = (imw - w) / 2.
width_x = imw - w
ox = np.random.normal(mid_x, width_x * 0.3, 1)
ox = int(np.clip(ox, 0, imw - w))
mid_y = (imh - h) / 2.
width_y = imh - h
oy = np.random.normal(mid_y, width_y * 0.3, 1)
oy = int(np.clip(oy, 0, imh - h))
# insert the crop into src.data[0]
src.data[:].copy_(torch.Tensor(image[..., ox:ox + w, oy:oy + h]))
else:
ox = int((imw - w) / 2)
oy = int((imh - h) / 2)
src.data[:].copy_(torch.Tensor(image[..., ox:ox + w, oy:oy + h]))
else:
ox = 0
oy = 0
src.data[:].copy_(torch.Tensor(image))
sigma = o['start_sigma'] + ((o['end_sigma'] - o['start_sigma']) * i) / o['iter_n']
step_size = o['start_step_size'] + ((o['end_step_size'] - o['start_step_size']) * i) / o['iter_n']
div_term = 0
if div_weight > 0:
# Compute distance matrix
images = (src * mask).view(len(src), -1) # num_images x num_pixels
if div_metric == 'correlation':
# computations restricted to the mask
means = (images.sum(dim=-1) / mask.sum()).view(len(images), 1, 1, 1)
residuals = ((src - means) * torch.sqrt(mask)).view(len(src), -1)
ssr = (((src - means) ** 2) * mask).sum(-1).sum(-1).sum(-1)
distance_matrix = -(torch.mm(residuals, residuals.t()) /
torch.sqrt(torch.ger(ssr, ssr)) + 1e-12)
elif div_metric == 'cosine':
image_norms = torch.norm(images, dim=-1)
distance_matrix = -(torch.mm(images, images.t()) /
(torch.ger(image_norms, image_norms) + 1e-12))
elif div_metric == 'euclidean':
distance_matrix = torch.norm(images.unsqueeze(0) -
images.unsqueeze(1), dim=-1)
else:
raise ValueError('Invalid distance metric {} for the diversity term'.format(div_metric))
# Compute overall distance in this image set
triu_idx = torch.triu(torch.ones(len(distance_matrix),
len(distance_matrix)), diagonal=1) == 1
if div_linkage == 'minimum':
distance = distance_matrix[triu_idx].min()
elif div_linkage == 'average':
distance = distance_matrix[triu_idx].mean()
else:
raise ValueError('Invalid linkage for the diversity term: {}'.format(div_linkage))
div_term = div_weight * distance
make_step(net, src, bias=bias, scale=scale, sigma=sigma, step_size=step_size,
add_loss=div_term, **step_params)
# TODO: Maybe save the MEIs every number of iterations and return all MEIs.
if i % 10 == 0:
print('finished step %d in octave %d' % (i, e))
# insert modified image back into original image (if necessary)
image[..., ox:ox + w, oy:oy + h] = src.detach().cpu().numpy()
# returning the resulting image
return unprocess(image, mu=bias, sigma=scale)
def create_gabor(height=36, width=64, phase=0, wavelength=10, orientation=0, sigma=5,
dy=0, dx=0):
""" Create a gabor patch (sinusoidal + gaussian).
Arguments:
height (int): Height of the image in pixels.
width (int): Width of the image in pixels.
phase (float): Angle at which to start the sinusoid in degrees.
wavelength (float): Wavelength of the sinusoid (1 / spatial frequency) in pixels.
orientation (float): Counterclockwise rotation to apply (0 is horizontal) in
degrees.
sigma (float): Sigma of the gaussian mask used in pixels
dy (float): Amount of translation in y (positive moves down) in pixels/height.
dx (float): Amount of translation in x (positive moves right) in pixels/height.
Returns:
Array of height x width shape with the required gabor.
"""
# Compute image size to avoid translation or rotation producing black spaces
padding = max(height, width)
imheight = height + 2 * padding
imwidth = width + 2 * padding
# we could have diff pad sizes per dimension = max(dim_size, sqrt((h/2)^2 + (w/2)^2))
# but this simplifies the code for just a bit of inefficiency
# Create sinusoid with right wavelength and phase
start_sample = phase
step_size = 360 / wavelength
samples = start_sample + step_size * np.arange(imheight)
samples = np.mod(samples, 360) # in degrees
rad_samples = samples * (np.pi / 180) # radians
sin = np.sin(rad_samples)
# Create Gabor by stacking the sinusoid along the cols
gabor = np.tile(sin, (imwidth, 1)).T
# Rotate around center
gabor = ndimage.rotate(gabor, orientation, reshape=False)
# Apply gaussian mask
gaussy = signal.gaussian(imheight, std=sigma)
gaussx = signal.gaussian(imwidth, std=sigma)
mask = np.outer(gaussy, gaussx)
gabor = gabor * mask
# Translate (this is only approximate but it should be good enough)
if abs(dx) > 1 or abs(dy) > 1:
raise ValueError('Please express translations as factors of the height/width,'
'i.e, a number in interval [-1, 1] ')
dy = int(dy * height) # int is the approximation
dx = int(dx * width)
gabor = gabor[padding - dy: -padding - dy, padding - dx: -padding - dx]
if gabor.shape != (height, width):
raise ValueError('Dimensions of gabor do not match desired dimensions.')
return gabor.astype(np.float32)
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def rename(rel, prefix='new_', exclude=[]):
attrs = list(rel.heading.attributes.keys())
original = [x for x in attrs if x not in exclude]
keys = [k for k in exclude if k in attrs]
name_map = {prefix+x: x for x in original}
return rel.proj(*keys, **name_map)
def plot_images(df, prefixes, names=None, brain_area='V1', n_rows=15, order_by='pearson',
panels=('normed_rf', 'normed_mei'), panel_names=('RF', 'MEI'), cmaps=('coolwarm', 'gray'),
y_infos=('{prefix}test_corr', 'pearson'), save_path=None):
if names is None:
names = prefixes
f = (df['brain_area'] == brain_area)
area_data = df[f]
area_data = area_data.sort_values(order_by, ascending=False)
n_rows = min(n_rows, len(area_data))
n_panels = len(panels)
cols = len(prefixes) * n_panels;
with sns.axes_style('white'):
fig, axs = plt.subplots(n_rows, cols, figsize=(4 * cols, round(2 * n_cells)))
st = fig.suptitle('MEIs on Shuffled {} dataset: {}'.format(brain_area, ', '.join(names)))
[ax.set_xticks([]) for ax in axs.ravel()]
[ax.set_yticks([]) for ax in axs.ravel()]
for ax_row, (_, data_row), row_index in zip(axs, area_data.iterrows(), count()):
for ax_group, prefix, name in zip(grouper(n_panels, ax_row), prefixes, names):
for ax, panel, panel_name, y_info, cm in zip(ax_group, panels, panel_names, y_infos, cmaps):
if row_index == 0:
ax.set_title('{}: {}'.format(panel_name, name))
ax.imshow(data_row[prefix + panel].squeeze(), cmap=cm)
if y_info is not None:
ax.set_ylabel('{:0.2f}%'.format(data_row[y_info.format(prefix=prefix)] * 100))
fig.tight_layout()
# shift subplots down:
st.set_y(0.98)
st.set_fontsize(20)
fig.subplots_adjust(top=0.95)
if path is not None:
fig.savefig(save_path)
def gen_gif(images, output_path, duration=5, scale=1, adj_single=False):
h, w = images[0].shape
imgsize = (w * scale, h * scale)
images = np.stack([cv2.resize(img, imgsize) for img in images])
axis = (1, 2) if adj_single else None
images = images - images.min(axis=axis, keepdims=True)
images = images / images.max(axis=axis, keepdims=True) * 255
images = images.astype('uint8')
single_duration = duration / len(images)
if not output_path.endswith('.gif'):
output_path += '.gif'
imageio.mimsave(output_path, images, duration=single_duration)
def rescale_images(images, low=0, high=1, together=True):
axis = None | |
#8683 in Alexa global
'http://www.record.com.mx/',
# Why: #8684 in Alexa global
'http://www.121ware.com/',
# Why: #8685 in Alexa global
'http://www.inkfrog.com/',
# Why: #8686 in Alexa global
'http://cnstock.com/',
# Why: #8687 in Alexa global
'http://www.marineaquariumfree.com/',
# Why: #8688 in Alexa global
'http://www.encuentra24.com/',
# Why: #8689 in Alexa global
'http://www.mixturecloud.com/',
# Why: #8690 in Alexa global
'http://www.yninfo.com/',
# Why: #8691 in Alexa global
'http://www.lesnumeriques.com/',
# Why: #8692 in Alexa global
'http://www.autopartswarehouse.com/',
# Why: #8693 in Alexa global
'http://www.lijit.com/',
# Why: #8694 in Alexa global
'http://www.ti.com/',
# Why: #8695 in Alexa global
'http://www.umd.edu/',
# Why: #8696 in Alexa global
'http://www.zdnet.co.uk/',
# Why: #8697 in Alexa global
'http://www.begin-download.com/',
# Why: #8698 in Alexa global
'http://www.showsiteinfo.us/',
# Why: #8699 in Alexa global
'http://www.uchicago.edu/',
# Why: #8700 in Alexa global
'http://www.whatsmyserp.com/',
# Why: #8701 in Alexa global
'http://www.asos.fr/',
# Why: #8702 in Alexa global
'http://www.ibosocial.com/',
# Why: #8703 in Alexa global
'http://www.amorenlinea.com/',
# Why: #8704 in Alexa global
'http://www.videopremium.tv/',
# Why: #8705 in Alexa global
'http://www.trkjmp.com/',
# Why: #8706 in Alexa global
'http://www.creativecow.net/',
# Why: #8707 in Alexa global
'http://www.webartex.ru/',
# Why: #8708 in Alexa global
'http://www.olx.com.ng/',
# Why: #8709 in Alexa global
'http://www.overclockzone.com/',
# Why: #8710 in Alexa global
'http://www.rongbay.com/',
# Why: #8711 in Alexa global
'http://www.maximustube.com/',
# Why: #8712 in Alexa global
'http://www.priberam.pt/',
# Why: #8713 in Alexa global
'http://www.comsenz.com/',
# Why: #8714 in Alexa global
'http://www.prensaescrita.com/',
# Why: #8715 in Alexa global
'http://www.gameslist.com/',
# Why: #8716 in Alexa global
'http://www.lingualeo.com/',
# Why: #8717 in Alexa global
'http://www.epfoservices.in/',
# Why: #8718 in Alexa global
'http://www.webbirga.net/',
# Why: #8719 in Alexa global
'http://www.pb.com/',
# Why: #8720 in Alexa global
'http://www.fineco.it/',
# Why: #8721 in Alexa global
'http://www.highrisehq.com/',
# Why: #8722 in Alexa global
'http://www.hotgoo.com/',
# Why: #8723 in Alexa global
'http://www.netdoctor.co.uk/',
# Why: #8725 in Alexa global
'http://domain.com/',
# Why: #8726 in Alexa global
'http://www.aramex.com/',
# Why: #8727 in Alexa global
'http://www.google.co.uz/',
# Why: #8728 in Alexa global
'http://www.savings.com/',
# Why: #8729 in Alexa global
'http://www.airtelbroadband.in/',
# Why: #8730 in Alexa global
'http://www.postimees.ee/',
# Why: #8731 in Alexa global
'http://www.wallsave.com/',
# Why: #8732 in Alexa global
'http://www.df.gob.mx/',
# Why: #8733 in Alexa global
'http://www.flashgames247.com/',
# Why: #8735 in Alexa global
'http://www.libsyn.com/',
# Why: #8736 in Alexa global
'http://www.goobike.com/',
# Why: #8737 in Alexa global
'http://www.trivago.com/',
# Why: #8738 in Alexa global
'http://www.mt.co.kr/',
# Why: #8739 in Alexa global
'http://www.android-hilfe.de/',
# Why: #8740 in Alexa global
'http://www.anquan.org/',
# Why: #8741 in Alexa global
'http://www.dota2.com/',
# Why: #8742 in Alexa global
'http://www.vladtv.com/',
# Why: #8743 in Alexa global
'http://www.oovoo.com/',
# Why: #8744 in Alexa global
'http://www.mybrowsercash.com/',
# Why: #8745 in Alexa global
'http://www.stafaband.info/',
# Why: #8746 in Alexa global
'http://www.vsao.vn/',
# Why: #8747 in Alexa global
'http://www.smithsonianmag.com/',
# Why: #8748 in Alexa global
'http://www.feedblitz.com/',
# Why: #8749 in Alexa global
'http://www.kibeloco.com.br/',
# Why: #8750 in Alexa global
'http://www.burningcamel.com/',
# Why: #8751 in Alexa global
'http://www.northwestern.edu/',
# Why: #8752 in Alexa global
'http://www.tucows.com/',
# Why: #8753 in Alexa global
'http://www.porn-granny-tube.com/',
# Why: #8754 in Alexa global
'http://www.linksys.com/',
# Why: #8755 in Alexa global
'http://www.avea.com.tr/',
# Why: #8756 in Alexa global
'http://www.ams.se/',
# Why: #8757 in Alexa global
'http://www.canadanepalvid.com/',
# Why: #8758 in Alexa global
'http://www.venmobulo.com/',
# Why: #8759 in Alexa global
'http://www.levi.com/',
# Why: #8760 in Alexa global
'http://www.freshome.com/',
# Why: #8761 in Alexa global
'http://www.loja2.com.br/',
# Why: #8762 in Alexa global
'http://www.gameduell.de/',
# Why: #8763 in Alexa global
'http://www.reserveamerica.com/',
# Why: #8764 in Alexa global
'http://www.fakings.com/',
# Why: #8765 in Alexa global
'http://www.akb48newstimes.jp/',
# Why: #8766 in Alexa global
'http://www.polygon.com/',
# Why: #8767 in Alexa global
'http://www.mtwebcenters.com.tw/',
# Why: #8768 in Alexa global
'http://www.news.mn/',
# Why: #8769 in Alexa global
'http://www.addictinginfo.org/',
# Why: #8770 in Alexa global
'http://www.bonanza.com/',
# Why: #8771 in Alexa global
'http://www.adlock.in/',
# Why: #8772 in Alexa global
'http://www.apni.tv/',
# Why: #8773 in Alexa global
'http://www.3m.com/',
# Why: #8774 in Alexa global
'http://www.gendama.jp/',
# Why: #8775 in Alexa global
'http://www.usingenglish.com/',
# Why: #8776 in Alexa global
'http://www.sammsoft.com/',
# Why: #8777 in Alexa global
'http://www.pedaily.cn/',
# Why: #8778 in Alexa global
'http://www.thevault.bz/',
# Why: #8779 in Alexa global
'http://www.groupon.my/',
# Why: #8780 in Alexa global
'http://www.banamex.com/',
# Why: #8781 in Alexa global
'http://hualongxiang.com/',
# Why: #8782 in Alexa global
'http://www.bodis.com/',
# Why: #8783 in Alexa global
'http://www.dqx.jp/',
# Why: #8784 in Alexa global
'http://www.io.ua/',
# Why: #8785 in Alexa global
'http://joy.cn/',
# Why: #8786 in Alexa global
'http://www.minglebox.com/',
# Why: #8787 in Alexa global
'http://www.forumspecialoffers.com/',
# Why: #8788 in Alexa global
'http://www.remax.com/',
# Why: #8789 in Alexa global
'http://www.makaan.com/',
# Why: #8790 in Alexa global
'http://www.voglioporno.com/',
# Why: #8791 in Alexa global
'http://www.chinaluxus.com/',
# Why: #8792 in Alexa global
'http://www.parenting.com/',
# Why: #8793 in Alexa global
'http://www.superdownloads.com.br/',
# Why: #8794 in Alexa global
'http://www.aeon.co.jp/',
# Why: #8795 in Alexa global
'http://www.nettavisen.no/',
# Why: #8796 in Alexa global
'http://www.21cbh.com/',
# Why: #8797 in Alexa global
'http://www.mobilestan.net/',
# Why: #8798 in Alexa global
'http://www.cheathappens.com/',
# Why: #8799 in Alexa global
'http://www.azxeber.com/',
# Why: #8800 in Alexa global
'http://www.foodgawker.com/',
# Why: #8801 in Alexa global
'http://www.miitbeian.gov.cn/',
# Why: #8802 in Alexa global
'http://www.eb80.com/',
# Why: #8803 in Alexa global
'http://www.dudamobile.com/',
# Why: #8804 in Alexa global
'http://www.sahafah.net/',
# Why: #8805 in Alexa global
'http://www.ait-themes.com/',
# Why: #8806 in Alexa global
'http://www.house.gov/',
# Why: #8807 in Alexa global
'http://www.ffffound.com/',
# Why: #8808 in Alexa global
'http://sssc.cn/',
# Why: #8809 in Alexa global
'http://www.khanwars.ir/',
# Why: #8810 in Alexa global
'http://www.wowslider.com/',
# Why: #8811 in Alexa global
'http://www.fashionara.com/',
# Why: #8812 in Alexa global
'http://www.pornxxxhub.com/',
# Why: #8813 in Alexa global
'http://www.minhavida.com.br/',
# Why: #8814 in Alexa global
'http://www.senzapudore.it/',
# Why: #8815 in Alexa global
'http://www.extra.cz/',
# Why: #8816 in Alexa global
'http://www.cinemark.com/',
# Why: #8817 in Alexa global
'http://www.career.ru/',
# Why: #8818 in Alexa global
'http://www.realself.com/',
# Why: #8819 in Alexa global
'http://www.i4455.com/',
# Why: #8820 in Alexa global
'http://www.ntlworld.com/',
# Why: #8821 in Alexa global
'http://chinaw3.com/',
# Why: #8822 in Alexa global
'http://www.berliner-sparkasse.de/',
# Why: #8823 in Alexa global
'http://www.autoscout24.be/',
# Why: #8824 in Alexa global
'http://www.heureka.sk/',
# Why: #8825 in Alexa global
'http://tienphong.vn/',
# Why: #8826 in Alexa global
'http://www.1001freefonts.com/',
# Why: #8827 in Alexa global
'http://www.bluestacks.com/',
# Why: #8828 in Alexa global
'http://www.livesports.pl/',
# Why: #8829 in Alexa global
'http://www.bd-pratidin.com/',
# Why: #8831 in Alexa global
'http://www.es.tl/',
# Why: #8832 in Alexa global
'http://www.backcountry.com/',
# Why: #8833 in Alexa global
'http://www.fourhourworkweek.com/',
# Why: #8834 in Alexa global
'http://ebay.cn/',
# Why: #8835 in Alexa global
'http://www.pointclicktrack.com/',
# Why: #8836 in Alexa global
'http://www.joomlacode.org/',
# Why: #8837 in Alexa global
'http://www.fantage.com/',
# Why: #8838 in Alexa global
'http://www.seowizard.ru/',
# Why: #8839 in Alexa global
'http://military38.com/',
# Why: #8840 in Alexa global
'http://www.wenkang.cn/',
# Why: #8842 in Alexa global
'http://www.swedbank.lt/',
# Why: #8843 in Alexa global
'http://www.govoyages.com/',
# Why: #8844 in Alexa global
'http://www.fgov.be/',
# Why: #8845 in Alexa global
'http://www.dengeki.com/',
# Why: #8846 in Alexa global
'http://www.3773.com.cn/',
# Why: #8847 in Alexa global
'http://www.ed4.net/',
# Why: #8848 in Alexa global
'http://www.mql5.com/',
# Why: #8849 in Alexa global
'http://www.gottabemobile.com/',
# Why: #8850 in Alexa global
'http://www.kdslife.com/',
# Why: #8851 in Alexa global
'http://5yi.com/',
# Why: #8852 in Alexa global
'http://www.bforex.com/',
# Why: #8853 in Alexa global
'http://www.eurogamer.net/',
# Why: #8854 in Alexa global
'http://www.az.pl/',
# Why: #8855 in Alexa global
'http://www.partypoker.com/',
# Why: #8856 in Alexa global
'http://www.cinapalace.com/',
# Why: #8857 in Alexa global
'http://www.sbt.com.br/',
# Why: #8858 in Alexa global
'http://www.nanos.jp/',
# Why: #8859 in Alexa global
'http://www.phpcms.cn/',
# Why: #8860 in Alexa global
'http://www.weatherzone.com.au/',
# Why: #8861 in Alexa global
'http://www.cutv.com/',
# Why: #8862 in Alexa global
'http://www.sweetwater.com/',
# Why: #8863 in Alexa global
'http://www.vodacom.co.za/',
# Why: #8864 in Alexa global
'http://www.hostgator.in/',
# Why: #8865 in Alexa global
'http://www.mojim.com/',
# Why: #8866 in Alexa global
'http://www.getnews.jp/',
# Why: #8868 in Alexa global
'http://www.eklablog.com/',
# Why: #8869 in Alexa global
'http://www.divaina.com/',
# Why: #8870 in Alexa global
'http://www.acces-charme.com/',
# | |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/piecharts.py
# experimental pie chart script. Two types of pie - one is a monolithic
#widget with all top-level properties, the other delegates most stuff to
#a wedges collection whic lets you customize the group or every individual
#wedge.
"""Basic Pie Chart class.
This permits you to customize and pop out individual wedges;
supports elliptical and circular pies.
"""
__version__=''' $Id: piecharts.py,v 1.1 2006/05/26 19:19:39 thomas Exp $ '''
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors, isNumberOrNone,\
isNoneOrListOfNoneOrStrings, isTextAnchor,\
isNoneOrListOfNoneOrNumbers, isBoxAnchor,\
isStringOrNone
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Ellipse, Wedge, String, STATE_DEFAULTS, ArcPath, Polygon
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from textlabels import Label
_ANGLE2BOXANCHOR={0:'w', 45:'sw', 90:'s', 135:'se', 180:'e', 225:'ne', 270:'n', 315: 'nw', -45: 'nw'}
class WedgeLabel(Label):
def _checkDXY(self,ba):
pass
def _getBoxAnchor(self):
na = (int((self._pmv%360)/45.)*45)%360
if not (na % 90): # we have a right angle case
da = (self._pmv - na) % 360
if abs(da)>5:
na = na + (da>0 and 45 or -45)
ba = _ANGLE2BOXANCHOR[na]
self._checkDXY(ba)
return ba
class WedgeProperties(PropHolder):
"""This holds descriptive information about the wedges in a pie chart.
It is not to be confused with the 'wedge itself'; this just holds
a recipe for how to format one, and does not allow you to hack the
angles. It can format a genuine Wedge object for you with its
format method.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
popout = AttrMapValue(isNumber),
fontName = AttrMapValue(isString),
fontSize = AttrMapValue(isNumber),
fontColor = AttrMapValue(isColorOrNone),
labelRadius = AttrMapValue(isNumber),
label_dx = AttrMapValue(isNumber),
label_dy = AttrMapValue(isNumber),
label_angle = AttrMapValue(isNumber),
label_boxAnchor = AttrMapValue(isBoxAnchor),
label_boxStrokeColor = AttrMapValue(isColorOrNone),
label_boxStrokeWidth = AttrMapValue(isNumber),
label_boxFillColor = AttrMapValue(isColorOrNone),
label_strokeColor = AttrMapValue(isColorOrNone),
label_strokeWidth = AttrMapValue(isNumber),
label_text = AttrMapValue(isStringOrNone),
label_leading = AttrMapValue(isNumberOrNone),
label_width = AttrMapValue(isNumberOrNone),
label_maxWidth = AttrMapValue(isNumberOrNone),
label_height = AttrMapValue(isNumberOrNone),
label_textAnchor = AttrMapValue(isTextAnchor),
label_visible = AttrMapValue(isBoolean,desc="True if the label is to be drawn"),
label_topPadding = AttrMapValue(isNumber,'padding at top of box'),
label_leftPadding = AttrMapValue(isNumber,'padding at left of box'),
label_rightPadding = AttrMapValue(isNumber,'padding at right of box'),
label_bottomPadding = AttrMapValue(isNumber,'padding at bottom of box'),
)
def __init__(self):
self.strokeWidth = 0
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.popout = 0
self.fontName = STATE_DEFAULTS["fontName"]
self.fontSize = STATE_DEFAULTS["fontSize"]
self.fontColor = STATE_DEFAULTS["fillColor"]
self.labelRadius = 1.2
self.label_dx = self.label_dy = self.label_angle = 0
self.label_text = None
self.label_topPadding = self.label_leftPadding = self.label_rightPadding = self.label_bottomPadding = 0
self.label_boxAnchor = 'c'
self.label_boxStrokeColor = None #boxStroke
self.label_boxStrokeWidth = 0.5 #boxStrokeWidth
self.label_boxFillColor = None
self.label_strokeColor = None
self.label_strokeWidth = 0.1
self.label_leading = self.label_width = self.label_maxWidth = self.label_height = None
self.label_textAnchor = 'start'
self.label_visible = 1
def _addWedgeLabel(self,text,add,angle,labelX,labelY,wedgeStyle,labelClass=WedgeLabel):
# now draw a label
if self.simpleLabels:
theLabel = String(labelX, labelY, text)
theLabel.textAnchor = "middle"
else:
theLabel = labelClass()
theLabel._pmv = angle
theLabel.x = labelX
theLabel.y = labelY
theLabel.dx = wedgeStyle.label_dx
theLabel.dy = wedgeStyle.label_dy
theLabel.angle = wedgeStyle.label_angle
theLabel.boxAnchor = wedgeStyle.label_boxAnchor
theLabel.boxStrokeColor = wedgeStyle.label_boxStrokeColor
theLabel.boxStrokeWidth = wedgeStyle.label_boxStrokeWidth
theLabel.boxFillColor = wedgeStyle.label_boxFillColor
theLabel.strokeColor = wedgeStyle.label_strokeColor
theLabel.strokeWidth = wedgeStyle.label_strokeWidth
_text = wedgeStyle.label_text
if _text is None: _text = text
theLabel._text = _text
theLabel.leading = wedgeStyle.label_leading
theLabel.width = wedgeStyle.label_width
theLabel.maxWidth = wedgeStyle.label_maxWidth
theLabel.height = wedgeStyle.label_height
theLabel.textAnchor = wedgeStyle.label_textAnchor
theLabel.visible = wedgeStyle.label_visible
theLabel.topPadding = wedgeStyle.label_topPadding
theLabel.leftPadding = wedgeStyle.label_leftPadding
theLabel.rightPadding = wedgeStyle.label_rightPadding
theLabel.bottomPadding = wedgeStyle.label_bottomPadding
theLabel.fontSize = wedgeStyle.fontSize
theLabel.fontName = wedgeStyle.fontName
theLabel.fillColor = wedgeStyle.fontColor
add(theLabel)
def _fixLabels(labels,n):
if labels is None:
labels = [''] * n
else:
i = n-len(labels)
if i>0: labels = labels + ['']*i
return labels
class Pie(Widget):
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc='X position of the chart within its container.'),
y = AttrMapValue(isNumber, desc='Y position of the chart within its container.'),
width = AttrMapValue(isNumber, desc='width of pie bounding box. Need not be same as width.'),
height = AttrMapValue(isNumber, desc='height of pie bounding box. Need not be same as height.'),
data = AttrMapValue(isListOfNumbers, desc='list of numbers defining wedge sizes; need not sum to 1'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
slices = AttrMapValue(None, desc="collection of wedge descriptor objects"),
simpleLabels = AttrMapValue(isBoolean, desc="If true(default) use String not super duper WedgeLabel"),
other_threshold = AttrMapValue(isNumber, desc='A value for doing thresh holding, not used yet.'),
)
other_threshold=None
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.data = [1]
self.labels = None # or list of strings
self.startAngle = 90
self.direction = "clockwise"
self.simpleLabels = 1
self.slices = TypedPropertyCollection(WedgeProperties)
self.slices[0].fillColor = colors.darkcyan
self.slices[1].fillColor = colors.blueviolet
self.slices[2].fillColor = colors.blue
self.slices[3].fillColor = colors.cyan
def demo(self):
d = Drawing(200, 100)
pc = Pie()
pc.x = 50
pc.y = 10
pc.width = 100
pc.height = 80
pc.data = [10,20,30,40,50,60]
pc.labels = ['a','b','c','d','e','f']
pc.slices.strokeWidth=0.5
pc.slices[3].popout = 10
pc.slices[3].strokeWidth = 2
pc.slices[3].strokeDashArray = [2,2]
pc.slices[3].labelRadius = 1.75
pc.slices[3].fontColor = colors.red
pc.slices[0].fillColor = colors.darkcyan
pc.slices[1].fillColor = colors.blueviolet
pc.slices[2].fillColor = colors.blue
pc.slices[3].fillColor = colors.cyan
pc.slices[4].fillColor = colors.aquamarine
pc.slices[5].fillColor = colors.cadetblue
pc.slices[6].fillColor = colors.lightcoral
d.add(pc)
return d
def normalizeData(self):
from operator import add
data = self.data
self._sum = sum = float(reduce(add,data,0))
return abs(sum)>=1e-8 and map(lambda x,f=360./sum: f*x, data) or len(data)*[0]
def makeWedges(self):
# normalize slice data
normData = self.normalizeData()
n = len(normData)
labels = _fixLabels(self.labels,n)
xradius = self.width/2.0
yradius = self.height/2.0
centerx = self.x + xradius
centery = self.y + yradius
if self.direction == "anticlockwise":
whichWay = 1
else:
whichWay = -1
g = Group()
i = 0
styleCount = len(self.slices)
startAngle = self.startAngle #% 360
for angle in normData:
endAngle = (startAngle + (angle * whichWay)) #% 360
if abs(startAngle-endAngle)>=1e-5:
if startAngle < endAngle:
a1 = startAngle
a2 = endAngle
else:
a1 = endAngle
a2 = startAngle
#if we didn't use %stylecount here we'd end up with the later wedges
#all having the default style
wedgeStyle = self.slices[i%styleCount]
# is it a popout?
cx, cy = centerx, centery
if wedgeStyle.popout <> 0:
# pop out the wedge
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle * pi/180.0
popdistance = wedgeStyle.popout
cx = centerx + popdistance * cos(aveAngleRadians)
cy = centery + popdistance * sin(aveAngleRadians)
if n > 1:
theWedge = Wedge(cx, cy, xradius, a1, a2, yradius=yradius)
elif n==1:
theWedge = Ellipse(cx, cy, xradius, yradius)
theWedge.fillColor = wedgeStyle.fillColor
theWedge.strokeColor = wedgeStyle.strokeColor
theWedge.strokeWidth = wedgeStyle.strokeWidth
theWedge.strokeDashArray = wedgeStyle.strokeDashArray
g.add(theWedge)
text = labels[i]
if text:
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle*pi/180.0
labelRadius = wedgeStyle.labelRadius
labelX = cx + (0.5 * self.width * cos(aveAngleRadians) * labelRadius)
labelY = cy + (0.5 * self.height * sin(aveAngleRadians) * labelRadius)
_addWedgeLabel(self,text,g.add,averageAngle,labelX,labelY,wedgeStyle)
startAngle = endAngle
i = i + 1
return g
def draw(self):
g = Group()
g.add(self.makeWedges())
return g
class LegendedPie(Pie):
"""Pie with a two part legend (one editable with swatches, one hidden without swatches)."""
_attrMap = AttrMap(BASE=Pie,
drawLegend = AttrMapValue(isBoolean, desc="If true then create and draw legend"),
legend1 = AttrMapValue(None, desc="Handle to legend for pie"),
legendNumberFormat = AttrMapValue(None, desc="Formatting routine for number on right hand side of legend."),
legendNumberOffset = AttrMapValue(isNumber, desc="Horizontal space between legend and numbers on r/hand side"),
pieAndLegend_colors = AttrMapValue(isListOfColors, desc="Colours used for both swatches and pie"),
legend_names = AttrMapValue(isNoneOrListOfNoneOrStrings, desc="Names used in legend (or None)"),
legend_data = AttrMapValue(isNoneOrListOfNoneOrNumbers, desc="Numbers used on r/hand side of legend (or None)"),
leftPadding = AttrMapValue(isNumber, desc='Padding on left of drawing'),
rightPadding = AttrMapValue(isNumber, desc='Padding on right of drawing'),
topPadding = AttrMapValue(isNumber, desc='Padding at top of drawing'),
bottomPadding = AttrMapValue(isNumber, desc='Padding at bottom of drawing'),
)
def __init__(self):
Pie.__init__(self)
self.x = 0
self.y = 0
self.height = 100
self.width = 100
self.data = [38.4, 20.7, 18.9, 15.4, 6.6]
self.labels = None
self.direction = 'clockwise'
PCMYKColor, black = colors.PCMYKColor, colors.black
self.pieAndLegend_colors = [PCMYKColor(11,11,72,0,spotName='PANTONE 458 CV'),
PCMYKColor(100,65,0,30,spotName='PANTONE 288 CV'),
PCMYKColor(11,11,72,0,spotName='PANTONE 458 CV',density=75),
PCMYKColor(100,65,0,30,spotName='PANTONE 288 CV',density=75),
PCMYKColor(11,11,72,0,spotName='PANTONE 458 CV',density=50),
PCMYKColor(100,65,0,30,spotName='PANTONE 288 CV',density=50)]
#Allows us up to six 'wedges' to be coloured
self.slices[0].fillColor=self.pieAndLegend_colors[0]
self.slices[1].fillColor=self.pieAndLegend_colors[1]
self.slices[2].fillColor=self.pieAndLegend_colors[2]
self.slices[3].fillColor=self.pieAndLegend_colors[3]
self.slices[4].fillColor=self.pieAndLegend_colors[4]
self.slices[5].fillColor=self.pieAndLegend_colors[5]
self.slices.strokeWidth = 0.75
self.slices.strokeColor = black
legendOffset = 17
self.legendNumberOffset = | |
matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
# slogdet requires the input to be of float, double, cfloat or cdouble types
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'of float, double, cfloat or cdouble types'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
sign_out = torch.empty_like(a).to(torch.int)
logabsdet_out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got sign with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
sign_out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "but got logabsdet with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@skipCUDAIf(torch.version.cuda is not None
and torch.version.cuda.split(".") < ["11", "3"], "There's a bug in cuSOLVER < 11.3")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == | |
cons2,
cons3,
cons8,
cons682,
cons228,
cons150,
cons20,
cons697,
)
rule1148 = ReplacementRule(pattern1148, replacement1148)
pattern1149 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** m_
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons682,
cons228,
cons150,
cons33,
cons531,
)
rule1149 = ReplacementRule(pattern1149, replacement1149)
pattern1150 = Pattern(
Integral(
x_ ** S(2) / (a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons698,
cons699,
)
rule1150 = ReplacementRule(pattern1150, With1150)
pattern1151 = Pattern(
Integral(
x_ ** WC("m", S(1))
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons48,
cons228,
cons700,
cons701,
cons415,
)
rule1151 = ReplacementRule(pattern1151, With1151)
pattern1152 = Pattern(
Integral(
x_ ** WC("m", S(1))
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons48,
cons228,
cons700,
cons702,
cons415,
)
rule1152 = ReplacementRule(pattern1152, With1152)
pattern1153 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** m_
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons682,
cons228,
cons150,
cons33,
cons703,
)
rule1153 = ReplacementRule(pattern1153, With1153)
pattern1154 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** WC("m", S(1))
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons682,
cons228,
cons150,
)
rule1154 = ReplacementRule(pattern1154, With1154)
pattern1155 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
cons295,
)
rule1155 = ReplacementRule(pattern1155, With1155)
pattern1156 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
cons678,
cons679,
)
rule1156 = ReplacementRule(pattern1156, With1156)
pattern1157 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
cons486,
cons179,
)
rule1157 = ReplacementRule(pattern1157, With1157)
pattern1158 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
CustomConstraint(With1158),
)
rule1158 = ReplacementRule(pattern1158, replacement1158)
pattern1159 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
CustomConstraint(With1159),
)
rule1159 = ReplacementRule(pattern1159, replacement1159)
pattern1160 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
CustomConstraint(With1160),
)
rule1160 = ReplacementRule(pattern1160, replacement1160)
pattern1161 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons677,
CustomConstraint(With1161),
)
rule1161 = ReplacementRule(pattern1161, replacement1161)
pattern1162 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons228,
cons680,
)
rule1162 = ReplacementRule(pattern1162, With1162)
pattern1163 = Pattern(
Integral(
x_ ** S(2)
/ sqrt(a_ + x_ ** S(4) * WC("c", S(1)) + x_ ** S(2) * WC("b", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons228,
cons681,
)
rule1163 = ReplacementRule(pattern1163, With1163)
pattern1164 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons5,
cons682,
cons228,
cons198,
cons20,
)
rule1164 = ReplacementRule(pattern1164, replacement1164)
pattern1165 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons5,
cons682,
cons228,
cons198,
cons369,
)
rule1165 = ReplacementRule(pattern1165, With1165)
pattern1166 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** m_
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons5,
cons682,
cons228,
cons198,
cons358,
)
rule1166 = ReplacementRule(pattern1166, replacement1166)
pattern1167 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons5,
cons682,
cons228,
cons491,
)
rule1167 = ReplacementRule(pattern1167, With1167)
pattern1168 = Pattern(
Integral(
(d_ * x_) ** m_
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons5,
cons682,
cons228,
cons491,
)
rule1168 = ReplacementRule(pattern1168, replacement1168)
pattern1169 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons682,
cons228,
cons543,
cons25,
)
rule1169 = ReplacementRule(pattern1169, replacement1169)
pattern1170 = Pattern(
Integral(
(d_ * x_) ** m_
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons4,
cons5,
cons682,
cons228,
cons543,
cons25,
)
rule1170 = ReplacementRule(pattern1170, replacement1170)
pattern1171 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** WC("m", S(1))
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons4,
cons682,
cons228,
)
rule1171 = ReplacementRule(pattern1171, With1171)
pattern1172 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons4,
cons682,
cons228,
cons704,
)
rule1172 = ReplacementRule(pattern1172, replacement1172)
pattern1173 = Pattern(
Integral(
(x_ * WC("d", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons4,
cons5,
cons682,
)
rule1173 = ReplacementRule(pattern1173, replacement1173)
pattern1174 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** mn_ * WC("b", S(1)) + x_ ** WC("n", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons587,
cons40,
cons683,
)
rule1174 = ReplacementRule(pattern1174, replacement1174)
pattern1175 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** mn_ * WC("b", S(1)) + x_ ** WC("n", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons587,
cons149,
cons683,
)
rule1175 = ReplacementRule(pattern1175, replacement1175)
pattern1176 = Pattern(
Integral(
(d_ * x_) ** WC("m", S(1))
* (a_ + x_ ** mn_ * WC("b", S(1)) + x_ ** WC("n", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons19,
cons4,
cons5,
cons587,
)
rule1176 = ReplacementRule(pattern1176, replacement1176)
pattern1177 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (
v_ ** n_ * WC("b", S(1))
+ v_ ** WC("n2", S(1)) * WC("c", S(1))
+ WC("a", S(0))
)
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons4,
cons5,
cons682,
cons554,
cons20,
cons555,
)
rule1177 = ReplacementRule(pattern1177, replacement1177)
pattern1178 = Pattern(
Integral(
u_ ** WC("m", S(1))
* (
v_ ** n_ * WC("b", S(1))
+ v_ ** WC("n2", S(1)) * WC("c", S(1))
+ WC("a", S(0))
)
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons682,
cons556,
)
rule1178 = ReplacementRule(pattern1178, replacement1178)
pattern1179 = Pattern(
Integral(
(d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
| |
data_frame if header.find(" Value ") >= 0
]
# Removes the columns for properties which are not of interest.
for header in property_headers:
property_type = header.split(" ")[0]
if property_type in schema.property_types:
continue
data_frame = data_frame.drop(header, axis=1)
uncertainty_header = header.replace(" Value ", " Uncertainty ")
if uncertainty_header in data_frame:
data_frame = data_frame.drop(uncertainty_header, axis=1)
# Drop any rows which do not contain any values for the property types of
# interest.
property_headers = [
header
for header in property_headers
if header.split(" ")[0] in schema.property_types
]
data_frame = data_frame.dropna(subset=property_headers, how="all")
# Apply a more specific filter which only retain which contain values
# for the specific property types, and which were measured for the
# specified number of components.
for property_type, n_components in schema.n_components.items():
property_header = next(
iter(x for x in property_headers if x.find(f"{property_type} ") == 0),
None,
)
if property_header is None:
continue
data_frame = data_frame[
data_frame[property_header].isna()
| data_frame["N Components"].isin(n_components)
]
# Apply the strict filter if requested
if schema.strict:
reordered_data_frame = reorder_data_frame(data_frame)
# Build a dictionary of which properties should be present partitioned
# by the number of components they should have been be measured for.
property_types = defaultdict(list)
if len(schema.n_components) > 0:
for property_type, n_components in schema.n_components.items():
for n_component in n_components:
property_types[n_component].append(property_type)
min_n_components = min(property_types)
max_n_components = max(property_types)
else:
min_n_components = reordered_data_frame["N Components"].min()
max_n_components = reordered_data_frame["N Components"].max()
for n_components in range(min_n_components, max_n_components + 1):
property_types[n_components].extend(schema.property_types)
substances_with_data = set()
components_with_data = {}
# For each N component find substances which have data points for
# all of the specified property types.
for n_components in range(min_n_components, max_n_components + 1):
component_data = reordered_data_frame[
reordered_data_frame["N Components"] == n_components
]
if n_components not in property_types or len(component_data) == 0:
continue
n_component_headers = [
header
for header in property_headers
if header.split(" ")[0] in property_types[n_components]
and header in component_data
]
if len(n_component_headers) != len(property_types[n_components]):
continue
n_component_substances = set.intersection(
*[
data_frame_to_substances(
component_data[component_data[header].notna()]
)
for header in n_component_headers
]
)
substances_with_data.update(n_component_substances)
components_with_data[n_components] = {
component
for substance in n_component_substances
for component in substance
}
if len(schema.n_components) > 0:
components_with_all_data = set.intersection(
*components_with_data.values()
)
# Filter out any smiles for don't appear in all of the N component
# substances.
data_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_include=[*components_with_all_data]),
)
# Filter out any substances which (within each N component) don't have
# all of the specified data types.
data_frame = FilterBySubstances.apply(
data_frame,
FilterBySubstancesSchema(substances_to_include=[*substances_with_data]),
)
data_frame = data_frame.dropna(axis=1, how="all")
return data_frame
class FilterByStereochemistrySchema(CurationComponentSchema):
type: Literal["FilterByStereochemistry"] = "FilterByStereochemistry"
class FilterByStereochemistry(CurationComponent):
"""A component which filters out data points measured for systems whereby the
stereochemistry of a number of components is undefined."""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByStereochemistrySchema,
n_processes,
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import UndefinedStereochemistryError
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
try:
Molecule.from_smiles(smiles)
except UndefinedStereochemistryError:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByChargedSchema(CurationComponentSchema):
type: Literal["FilterByCharged"] = "FilterByCharged"
class FilterByCharged(CurationComponent):
"""A component which filters out data points measured for substances where any of
the constituent components have a net non-zero charge.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByChargedSchema, n_processes
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
# noinspection PyUnresolvedReferences
atom_charges = [
atom.formal_charge
if isinstance(atom.formal_charge, int)
else atom.formal_charge.m_as(unit.elementary_charge)
for atom in molecule.atoms
]
if numpy.isclose(sum(atom_charges), 0.0):
continue
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByIonicLiquidSchema(CurationComponentSchema):
type: Literal["FilterByIonicLiquid"] = "FilterByIonicLiquid"
class FilterByIonicLiquid(CurationComponent):
"""A component which filters out data points measured for substances which
contain or are classed as an ionic liquids.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByIonicLiquidSchema,
n_processes,
) -> pandas.DataFrame:
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
if "." in smiles:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmilesSchema(CurationComponentSchema):
type: Literal["FilterBySmiles"] = "FilterBySmiles"
smiles_to_include: Optional[List[str]] = Field(
None,
description="The smiles patterns to retain. This option is mutually "
"exclusive with `smiles_to_exclude`",
)
smiles_to_exclude: Optional[List[str]] = Field(
None,
description="The smiles patterns to exclude. This option is mutually "
"exclusive with `smiles_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must appear in "
"the `smiles_to_include` list, otherwise, only some must appear. "
"This option only applies when `smiles_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smiles_to_include = values.get("smiles_to_include")
smiles_to_exclude = values.get("smiles_to_exclude")
assert smiles_to_include is not None or smiles_to_exclude is not None
assert smiles_to_include is None or smiles_to_exclude is None
return values
class FilterBySmiles(CurationComponent):
"""A component which filters the data set so that it only contains either a
specific set of smiles, or does not contain any of a set of specifically excluded
smiles.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmilesSchema, n_processes
) -> pandas.DataFrame:
smiles_to_include = schema.smiles_to_include
smiles_to_exclude = schema.smiles_to_exclude
if smiles_to_include is not None:
smiles_to_exclude = []
elif smiles_to_exclude is not None:
smiles_to_include = []
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
if any(x in smiles_to_exclude for x in component_smiles):
return False
elif len(smiles_to_exclude) > 0:
return True
if not schema.allow_partial_inclusion and not all(
x in smiles_to_include for x in component_smiles
):
return False
if schema.allow_partial_inclusion and not any(
x in smiles_to_include for x in component_smiles
):
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmirksSchema(CurationComponentSchema):
type: Literal["FilterBySmirks"] = "FilterBySmirks"
smirks_to_include: Optional[List[str]] = Field(
None,
description="The smirks patterns which must be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_exclude`",
)
smirks_to_exclude: Optional[List[str]] = Field(
None,
description="The smirks patterns which must not be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must match at least "
"one pattern in `smirks_to_include` in order to retain a measurement, "
"otherwise, only a least one component must match. This option only applies "
"when `smirks_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smirks_to_include = values.get("smirks_to_include")
smirks_to_exclude = values.get("smirks_to_exclude")
assert smirks_to_include is not None or smirks_to_exclude is not None
assert smirks_to_include is None or smirks_to_exclude is None
return values
class FilterBySmirks(CurationComponent):
"""A component which filters a data set so that it only contains measurements made
for molecules which contain (or don't) a set of chemical environments
represented by SMIRKS patterns.
"""
@staticmethod
@functools.lru_cache(1000)
def _find_smirks_matches(smiles_pattern, *smirks_patterns):
"""Determines which (if any) of the specified smirks match the specified
molecule.
Parameters
----------
smiles_pattern: str
The SMILES representation to try and match against.
smirks_patterns: str
The smirks patterns to try and match.
Returns
-------
list of str
The matched smirks patterns.
"""
from openff.toolkit.topology import Molecule
if len(smirks_patterns) == 0:
return []
molecule = Molecule.from_smiles(smiles_pattern, allow_undefined_stereo=True)
matches = [
smirks
for smirks in smirks_patterns
if len(molecule.chemical_environment_matches(smirks)) > 0
]
return matches
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmirksSchema, n_processes
) -> pandas.DataFrame:
smirks_to_match = (
schema.smirks_to_include
if schema.smirks_to_include
else schema.smirks_to_exclude
)
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
smirks_matches = {
smiles: cls._find_smirks_matches(smiles, *smirks_to_match)
for smiles in component_smiles
}
if schema.smirks_to_exclude is not None:
return not any(len(x) > 0 for x in smirks_matches.values())
if schema.allow_partial_inclusion:
return any(len(x) > 0 for x in smirks_matches.values())
return all(len(x) > 0 for x in smirks_matches.values())
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByNComponentsSchema(CurationComponentSchema):
type: Literal["FilterByNComponents"] = "FilterByNComponents"
n_components: List[PositiveInt] = Field(
...,
description="The number of components that measurements should have been "
"measured for in order to be retained.",
)
class FilterByNComponents(CurationComponent):
"""A component which filters out data points measured for systems with specified
number of components.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByNComponentsSchema,
n_processes,
) -> pandas.DataFrame:
return data_frame[data_frame["N Components"].isin(schema.n_components)]
class FilterBySubstancesSchema(CurationComponentSchema):
type: Literal["FilterBySubstances"] = "FilterBySubstances"
substances_to_include: | |
<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Model includes :class:`Model` class, the main structure of an opt. model
'''
import inspect
from math import inf
from types import GeneratorType
import warnings
import numpy as np
import pandas as pd
import sasoptpy.components
import sasoptpy.utils
class Model:
'''
Creates an optimization model
Parameters
----------
name : string
Name of the model
session : :class:`swat.cas.connection.CAS` object or \
:class:`saspy.SASsession` object, optional
CAS or SAS Session object
Examples
--------
>>> from swat import CAS
>>> import sasoptpy as so
>>> s = CAS('cas.server.address', port=12345)
>>> m = so.Model(name='my_model', session=s)
NOTE: Initialized model my_model
>>> mip = so.Model(name='mip')
NOTE: Initialized model mip
'''
def __init__(self, name, session=None):
self._name = sasoptpy.utils.check_name(name, 'model')
self._session = session
self._variables = []
self._constraints = []
self._vargroups = []
self._congroups = []
self._objective = sasoptpy.components.Expression(0, name=name+'_obj')
self._datarows = []
self._sense = sasoptpy.utils.MIN
self._variableDict = {}
self._constraintDict = {}
self._vcid = {}
self._soltime = 0
self._objval = None
self._status = ''
self._castablename = None
self._mpsmode = 0
self._problemSummary = None
self._solutionSummary = None
self._primalSolution = pd.DataFrame()
self._dualSolution = pd.DataFrame()
self._milp_opts = {}
self._lp_opts = {}
self._sets = []
self._parameters = []
self._impvars = []
self._statements = []
self._objorder = sasoptpy.utils.register_name(name, self)
self.response = None
print('NOTE: Initialized model {}.'.format(name))
def __eq__(self, other):
if not isinstance(other, sasoptpy.Model):
warnings.warn('Cannot compare Model object with {}'.
format(type(other)), RuntimeWarning, stacklevel=2)
return False
return super().__eq__(other)
def add_variable(self, var=None, vartype=sasoptpy.utils.CONT, name=None,
lb=-inf, ub=inf, init=None):
'''
Adds a new variable to the model
New variables can be created via this method or existing variables
can be added to the model.
Parameters
----------
var : :class:`Variable` object, optional
Existing variable to be added to the problem
vartype : string, optional
Type of the variable, either 'BIN', 'INT' or 'CONT'
name : string, optional
Name of the variable to be created
lb : float, optional
Lower bound of the variable
ub : float, optional
Upper bound of the variable
init : float, optional
Initial value of the variable
Returns
-------
:class:`Variable` object
Variable that is added to the model
Examples
--------
Adding a variable on the fly
>>> m = so.Model(name='demo')
>>> x = m.add_variable(name='x', vartype=so.INT, ub=10, init=2)
>>> print(repr(x))
NOTE: Initialized model demo
sasoptpy.Variable(name='x', lb=0, ub=10, init=2, vartype='INT')
Adding an existing variable to a model
>>> y = so.Variable(name='y', vartype=so.BIN)
>>> m = so.Model(name='demo')
>>> m.add_variable(var=y)
Notes
-----
* If argument *var* is not None, then all other arguments are ignored.
* A generic variable name is generated if name argument is None.
See also
--------
:class:`Variable`, :func:`Model.include`
'''
# name = check_name(name, 'var')
# Check bounds
if lb is None:
lb = 0
if ub is None:
ub = inf
# Existing or new variable
if var is not None:
if isinstance(var, sasoptpy.components.Variable):
self._variables.append(var)
else:
print('ERROR: Use the appropriate argument name for variable.')
else:
var = sasoptpy.components.Variable(name, vartype, lb, ub, init)
self._variables.append(var)
self._variableDict[var._name] = var
return var
def add_variables(self, *argv, vg=None, name=None,
vartype=sasoptpy.utils.CONT,
lb=None, ub=None, init=None, abstract=None):
'''
Adds a group of variables to the model
Parameters
----------
argv : list, dict, :class:`pandas.Index`
Loop index for variable group
vg : :class:`VariableGroup` object, optional
An existing object if it is being added to the model
name : string, optional
Name of the variables
vartype : string, optional
Type of variables, `BIN`, `INT`, or `CONT`
lb : list, dict, :class:`pandas.Series`
Lower bounds of variables
ub : list, dict, :class:`pandas.Series`
Upper bounds of variables
init : list, dict, :class:`pandas.Series`
Initial values of variables
See also
--------
:class:`VariableGroup`, :meth:`Model.include`
Notes
-----
If `vg` argument is passed, all other arguments are ignored.
Examples
--------
>>> production = m.add_variables(PERIODS, vartype=so.INT,
name='production', lb=min_production)
>>> print(production)
>>> print(repr(production))
Variable Group (production) [
[Period1: production['Period1',]]
[Period2: production['Period2',]]
[Period3: production['Period3',]]
]
sasoptpy.VariableGroup(['Period1', 'Period2', 'Period3'],
name='production')
'''
if vg is not None:
if isinstance(vg, sasoptpy.components.VariableGroup):
for i in vg:
self._variables.append(i)
else:
print('ERROR: Cannot add variable group of type {}'.format(
type(vg)))
else:
name = sasoptpy.utils.check_name(name, 'var')
if abstract is None:
abstract = isinstance(argv[0], sasoptpy.data.Set)
vg = sasoptpy.components.VariableGroup(*argv, name=name,
vartype=vartype,
lb=lb, ub=ub, init=init,
abstract=abstract)
for i in vg:
self._variables.append(i)
for i in vg:
self._variableDict[i._name] = i
self._vargroups.append(vg)
return vg
def add_constraint(self, c, name=None):
'''
Adds a single constraint to the model
Parameters
----------
c : Constraint
Constraint to be added to the model
name : string, optional
Name of the constraint
Returns
-------
:class:`Constraint` object
Examples
--------
>>> x = m.add_variable(name='x', vartype=so.INT, lb=0, ub=5)
>>> y = m.add_variables(3, name='y', vartype=so.CONT, lb=0, ub=10)
>>> c1 = m.add_constraint(x + y[0] >= 3, name='c1')
>>> print(c1)
x + y[0] >= 3
>>> c2 = m.add_constraint(x - y[2] == [4, 10], name='c2')
>>> print(c2)
- y[2] + x = [4, 10]
See also
--------
:class:`Constraint`, :meth:`Model.include`
'''
if isinstance(c, sasoptpy.components.Constraint):
# Do not add if the constraint is not valid
if ((c._direction == 'L' and c._linCoef['CONST']['val'] == -inf) or
(c._direction == 'G' and c._linCoef['CONST']['val'] == inf)):
return None
self._constraints.append(c)
if name is not None or (name is None and c._name is None):
name = sasoptpy.utils.check_name(name, 'con')
c._name = name
c._objorder = sasoptpy.utils.register_name(name, c)
self._constraintDict[c._name] = c
else:
raise Exception('Expression is not a constraint!')
# Return reference to the Constraint object
return c
def add_constraints(self, argv, cg=None, name=None):
'''
Adds a set of constraints to the model
Parameters
----------
argv : Generator type objects
List of constraints as a Generator-type object
cg : :class:`ConstraintGroup` object, optional
An existing list of constraints if an existing group is being added
name : string, optional
Name for the constraint group and individual constraint prefix
Returns
-------
:class:`ConstraintGroup` object
A group object for all constraints aded
Examples
--------
>>> x = m.add_variable(name='x', vartype=so.INT, lb=0, ub=5)
>>> y = m.add_variables(3, name='y', vartype=so.CONT, lb=0, ub=10)
>>> c = m.add_constraints((x + 2 * y[i] >= 2 for i in [0, 1, 2]),
name='c')
>>> print(c)
Constraint Group (c) [
[0: 2.0 * y[0] + x >= 2]
[1: 2.0 * y[1] + x >= 2]
[2: 2.0 * y[2] + x >= 2]
]
>>> t = m.add_variables(3, 4, name='t')
>>> ct = m.add_constraints((t[i, j] <= x for i in range(3)
for j in range(4)), name='ct')
>>> print(ct)
Constraint Group (ct) [
[(0, 0): - x + t[0, 0] <= 0]
[(0, 1): t[0, 1] - x <= 0]
[(0, 2): - x + t[0, 2] <= 0]
[(0, 3): t[0, 3] - x <= 0]
[(1, 0): t[1, 0] - x <= 0]
[(1, 1): t[1, 1] - x <= 0]
[(1, 2): - x + t[1, 2] <= 0]
[(1, 3): - x + t[1, 3] <= 0]
[(2, 0): - x + t[2, 0] <= 0]
[(2, 1): t[2, 1] - x <= 0]
[(2, 2): t[2, 2] - x <= 0]
[(2, 3): t[2, 3] - x <= 0]
]
See also
--------
:class:`ConstraintGroup`, :meth:`Model.include`
'''
if cg is not None:
if isinstance(cg, sasoptpy.components.ConstraintGroup):
for i in cg:
self._constraints.append(i)
self._constraintDict[i._name] = i
else:
print('ERROR: Cannot add constraint group of type {}'.format(
type(cg)))
self._congroups.append(cg)
return cg
else:
if type(argv) == list or type(argv) == GeneratorType:
name = sasoptpy.utils.check_name(name, 'con')
cg = sasoptpy.components.ConstraintGroup(argv, name=name)
for i in cg:
self._constraints.append(i)
self._constraintDict[i._name] = i
self._congroups.append(cg)
return cg
elif type(argv) == sasoptpy.components.Constraint:
print('WARNING: add_constraints argument is a single' +
' constraint, inserting as a single constraint')
name = sasoptpy.utils.check_name(name, 'con')
c = self.add_constraint(c=argv, name=name)
return c
def add_set(self, name, init=None, settype=['num']):
'''
Adds a set to the model
Parameters
----------
name | |
showdate = False, prefix = '', suffix = ''):
return plog('<I> ', msg, showtime, showdate, prefix, suffix, TermColor.Green)
def pdbg(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<D> ', msg, showtime, showdate, prefix, suffix, TermColor.Cyan)
def askc(msg, enter = True):
pr(msg)
if enter:
pr('Press [Enter] when you are done')
return raw_input()
ask = askc
# print progress
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def pprgrc(finish, total, start_time = None, existing = 0,
prefix = '', suffix = '', seg = 20):
# we don't want this goes to the log, so we use stderr
if total > 0:
segth = seg * finish // total
percent = 100 * finish // total
else:
segth = seg
percent = 100
eta = ''
now = time.time()
if start_time is not None and percent > 5 and finish > 0:
finishf = float(finish) - float(existing)
totalf = float(total)
remainf = totalf - float(finish)
elapsed = now - start_time
speed = human_speed(finishf / elapsed)
eta = 'ETA: ' + human_time_short(elapsed * remainf / finishf) + \
' (' + speed + ', ' + \
human_time_short(elapsed) + ' gone)'
msg = '\r' + prefix + '[' + segth * '=' + (seg - segth) * '_' + ']' + \
" {}% ({}/{})".format(percent, si_size(finish), si_size(total)) + \
' ' + eta + suffix
sys.stderr.write(msg + ' ') # space is used as a clearer
sys.stderr.flush()
pprgr = pprgrc
# marshalling
def str2bool(s):
if isinstance(s, basestring):
if s:
sc = s.lower()[0]
if sc == 't' or sc == 'y' or (sc >= '1' and sc <= '9'):
return True
else:
return False
else:
return False
else:
# don't change
return s
def str2int(s):
if isinstance(s, basestring):
return int(s)
else:
# don't change
return s
def str2float(s):
if isinstance(s, basestring):
return float(s)
else:
# don't change
return s
def si_size(num, precision = 3):
''' DocTests:
>>> si_size(1000)
u'1000B'
>>> si_size(1025)
u'1.001KB'
'''
numa = abs(num)
if numa < OneK:
return str(num) + 'B'
elif numa < OneM:
return str(round(float(num) / float(OneK), precision)) + 'KB'
elif numa < OneG:
return str(round(float(num) / float(OneM), precision)) + 'MB'
elif numa < OneT:
return str(round(float(num) / float(OneG), precision)) + 'GB'
elif numa < OneP:
return str(round(float(num) / float(OneT), precision)) + 'TB'
elif numa < OneE:
return str(round(float(num) / float(OneP), precision)) + 'PB'
else :
return str(num) + 'B'
si_table = {
'K' : OneK,
'M' : OneM,
'G' : OneG,
'T' : OneT,
'E' : OneE }
def interpret_size(si):
'''
>>> interpret_size(10)
10
>>> interpret_size('10')
10
>>> interpret_size('10b')
10
>>> interpret_size('10k')
10240
>>> interpret_size('10K')
10240
>>> interpret_size('10kb')
10240
>>> interpret_size('10kB')
10240
>>> interpret_size('a10')
Traceback (most recent call last):
ValueError
>>> interpret_size('10a')
Traceback (most recent call last):
KeyError: 'A'
'''
m = re.match(r"\s*(\d+)\s*([ac-z]?)(b?)\s*$", str(si), re.I)
if m:
if not m.group(2) and m.group(3):
times = 1
else:
times = si_table[m.group(2).upper()] if m.group(2) else 1
return int(m.group(1)) * times
else:
raise ValueError
def human_time(seconds):
''' DocTests:
>>> human_time(0)
u''
>>> human_time(122.1)
u'2m2s'
>>> human_time(133)
u'2m13s'
>>> human_time(12345678)
u'20W2D21h21m18s'
'''
isec = int(seconds)
s = isec % 60
m = isec / 60 % 60
h = isec / 60 / 60 % 24
d = isec / 60 / 60 / 24 % 7
w = isec / 60 / 60 / 24 / 7
result = ''
for t in [ ('W', w), ('D', d), ('h', h), ('m', m), ('s', s) ]:
if t[1]:
result += str(t[1]) + t[0]
return result
def limit_unit(timestr, num = 2):
''' DocTests:
>>> limit_unit('1m2s', 1)
u'1m'
>>> limit_unit('1m2s')
u'1m2s'
>>> limit_unit('1m2s', 4)
u'1m2s'
>>> limit_unit('1d2h3m2s')
u'1d2h'
>>> limit_unit('1d2h3m2s', 1)
u'1d'
'''
l = len(timestr)
i = 0
p = 0
while i < num and p <= l:
at = 0
while p < l:
c = timestr[p]
if at == 0:
if c.isdigit():
p += 1
else:
at += 1
elif at == 1:
if not c.isdigit():
p += 1
else:
at += 1
else:
break
i += 1
return timestr[:p]
def human_time_short(seconds):
return limit_unit(human_time(seconds))
def human_speed(speed, precision = 0):
''' DocTests:
'''
# https://stackoverflow.com/questions/15263597/python-convert-floating-point-number-to-certain-precision-then-copy-to-string/15263885#15263885
numfmt = '{{:.{}f}}'.format(precision)
if speed < OneK:
return numfmt.format(speed) + 'B/s'
elif speed < OneM:
return numfmt.format(speed / float(OneK)) + 'KB/s'
elif speed < OneG:
return numfmt.format(speed / float(OneM)) + 'MB/s'
elif speed < OneT:
return numfmt.format(speed / float(OneG)) + 'GB/s'
else:
return 'HAHA'
def remove_backslash(s):
return s.replace(r'\/', r'/')
def rb(s):
return s.replace(r'\/', r'/')
# no leading, trailing '/'
# remote path rule:
# - all public methods of ByPy shall accept remote path as "partial path"
# (before calling get_pcs_path())
# - all private methods of ByPy shall accept remote path as "full path"
# (after calling get_pcs_path())
def get_pcs_path(path):
if not path or path == '/' or path == '\\':
return AppPcsPath
return (AppPcsPath + '/' + path.strip('/')).rstrip('/')
# guarantee no-exception
def copyfile(src, dst):
result = ENoError
try:
shutil.copyfile(src, dst)
except (shutil.Error, IOError) as ex:
perr("Fail to copy '{}' to '{}'.\nException:\n{}\nStack:{}\n".format(
src, dst, ex, traceback.format_exc()))
result = EFailToCreateLocalFile
return result
def movefile(src, dst):
result = ENoError
try:
shutil.move(src, dst)
except (shutil.Error, OSError) as ex:
perr("Fail to move '{}' to '{}'.\nException:\n{}\nStack:\n{}\n".format(
src, dst, ex, traceback.format_exc()))
result = EFailToCreateLocalFile
return result
def removefile(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local file '{}'".format(path))
if path:
os.remove(path)
except Exception as ex:
perr("Fail to remove local fle '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToDeleteFile
return result
def removedir(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local directory '{}'".format(path))
if path:
shutil.rmtree(path)
except Exception as ex:
perr("Fail to remove local directory '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToDeleteDir
return result
def makedir(path, mode = 0o777, verbose = False):
result = ENoError
if verbose:
pr("Creating local directory '{}'".format(path))
if path and not os.path.exists(path):
try:
os.makedirs(path, mode)
except os.error as ex:
perr("Failed at creating local dir '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToCreateLocalDir
return result
# guarantee no-exception
def getfilesize(path):
size = -1
try:
size = os.path.getsize(path)
except os.error:
perr("Exception occured while getting size of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return size
# guarantee no-exception
def getfilemtime(path):
mtime = -1
try:
mtime = os.path.getmtime(path)
except os.error:
perr("Exception occured while getting modification time of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return mtime
# seems os.path.join() doesn't handle Unicode well
def joinpath(first, second, sep = os.sep):
head = ''
if first:
head = first.rstrip(sep) + sep
tail = ''
if second:
tail = second.lstrip(sep)
return head + tail
def donothing():
pass
# https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning
def disable_urllib3_warning():
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# https://stackoverflow.com/questions/10883399/unable-to-encode-decode-pprint-output
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, obj, context, maxlevels, level):
if isinstance(obj, unicode):
#return (obj.encode('utf8'), True, False)
return (obj, True, False)
if isinstance(obj, str):
convert = False
#for c in obj:
# if ord(c) >= 128:
# convert = True
# break
try:
codecs.decode(obj)
except:
convert = True
if convert:
return ("0x{}".format(binascii.hexlify(obj)), True, False)
return pprint.PrettyPrinter.format(self, obj, context, maxlevels, level)
# there is room for more space optimization (like using the tree structure),
# but it's not added at the moment. for now, it's just simple pickle.
# SQLite might be better for portability
# NOTE: file names are case-sensitive
class cached(object):
''' simple decorator for hash caching (using pickle) '''
usecache = True
verbose = False
debug = False
cache = {}
cacheloaded = False
dirty = False
# we don't do cache loading / unloading here because it's an decorator,
# and probably multiple instances are created for md5, crc32, etc
# it's a bit complex, and i thus don't have the confidence to do it in ctor/dtor
def __init__(self, f):
self.f = f
def __call__(self, *args):
assert len(args) > 0
result = None
path = args[0]
dir, file = os.path.split(path) # the 'filename' parameter
absdir = os.path.abspath(dir)
if absdir in cached.cache:
entry = cached.cache[absdir]
if file in entry:
info = entry[file]
if self.f.__name__ in info \
and info['size'] == getfilesize(path) \
and info['mtime'] == getfilemtime(path) \
and self.f.__name__ in info \
and cached.usecache:
result = info[self.f.__name__]
if cached.debug:
pdbg("Cache hit for file '{}',\n{}: {}\nsize: {}\nmtime: {}".format(
path, self.f.__name__,
result if isinstance(result, (int, long, float, complex)) else binascii.hexlify(result),
info['size'], info['mtime']))
else:
result = self.f(*args)
self.__store(info, path, result)
else:
result = self.f(*args)
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
else:
result = self.f(*args)
cached.cache[absdir] = {}
entry = cached.cache[absdir]
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
return result
def __store(self, info, path, value):
cached.dirty = True
info['size'] = getfilesize(path)
info['mtime'] = getfilemtime(path)
info[self.f.__name__] = value
if cached.debug:
situation = "Storing cache"
if cached.usecache:
situation = "Cache miss"
pdbg((situation + " for file '{}',\n{}: {}\nsize: {}\nmtime: {}").format(
path, self.f.__name__,
value if isinstance(value, (int, long, float, complex)) else binascii.hexlify(value),
info['size'], info['mtime']))
# periodically save to prevent loss in case of system crash
global last_cache_save
now = time.time()
if now - last_cache_save >= CacheSavePeriodInSec:
cached.savecache()
last_cache_save = now
if cached.debug:
pdbg("Periodically saving Hash Cash")
@staticmethod
def loadcache():
# load cache even we don't use cached hash values,
# because we will save (possibly updated) and hash values
if not cached.cacheloaded: # no double-loading
if cached.verbose:
pr("Loading Hash Cache File '{}'...".format(HashCachePath))
if os.path.exists(HashCachePath):
try:
with open(HashCachePath, 'rb') as f:
cached.cache = pickle.load(f)
cached.cacheloaded = True
if cached.verbose:
pr("Hash Cache File loaded.")
except (
pickle.PickleError,
# the following is for dealing with corrupted cache file
EOFError, TypeError, ValueError):
perr("Fail to load the Hash Cache, no caching. Exception:\n{}".format(traceback.format_exc()))
cached.cache = {}
else:
if cached.verbose:
pr("Hash Cache File not found, no caching")
else:
if cached.verbose:
pr("Not loading Hash Cache since 'cacheloaded' is '{}'".format( cached.cacheloaded))
return cached.cacheloaded
@staticmethod
def savecache(force_saving = False):
saved = False
# even if we were unable to load the cache, we still save it.
if cached.dirty or force_saving:
if cached.verbose:
pr("Saving Hash Cache...")
try:
with open(HashCachePath, 'wb') as f:
pickle.dump(cached.cache, f)
f.close()
if cached.verbose:
pr("Hash Cache saved.")
saved = True
cached.dirty = False
except Exception:
perr("Failed to save Hash Cache. Exception:\n{}".format(traceback.format_exc()))
else:
if cached.verbose:
pr("Not saving Hash Cache since 'dirty' is '{}' and 'force_saving' is '{}'".format(
cached.dirty, force_saving))
return saved
@staticmethod
def cleancache():
if cached.loadcache():
for absdir in cached.cache.keys():
if not os.path.exists(absdir):
if cached.verbose:
pr("Directory: '{}' no longer exists, removing the cache entries".format(absdir))
cached.dirty = True
del cached.cache[absdir]
else:
oldfiles = cached.cache[absdir]
files = {}
needclean = False
for f in oldfiles.keys():
#p = os.path.join(absdir, f)
p = joinpath(absdir, f)
if os.path.exists(p):
files[f] = oldfiles[f]
else:
if cached.verbose:
needclean = True
pr("File '{}' no longer exists, removing the cache entry".format(p))
if needclean:
cached.dirty = True
cached.cache[absdir] = files
cached.savecache()
@cached
def md5(filename, slice = OneM):
m = hashlib.md5()
with open(filename, "rb") as f:
while True:
buf = f.read(slice)
if buf:
m.update(buf)
else:
break
return m.digest()
# slice md5 for baidu rapidupload
@cached
def slice_md5(filename):
m = hashlib.md5()
with open(filename, "rb") as f:
buf = f.read(256 * OneK)
m.update(buf)
return m.digest()
@cached
def | |
"""
Definitions:
A Python module is a Python file ending in .py, which is imported into another script to be used there.
These are used to bring in functions, classes, etc. that are in the module. This allows for organizing of classes
and functions into files, and then used in your main script. Note that if you do this, the imported file must be
findable by your Python environment, i.e. in this case they are contained within the project folder.
Note: there isn't a functional difference between a module and a script--the only difference is intention. A module
is a script imported so you can use the stuff inside of it.
A Python package is a particular way of creating a folder structure that contains Python modules (or sub-folders
that contain Python modules), set up in such a way that it can be treated somewhat like a Python package. A large
set of classes and functions can be organized into multiple modules, sorted into different parts of a package.
An attribute is a characteristic that a Python object has. Things like height, ID, weight, hair color, etc.
A method is a thing that a Python object can do. Things like rowing a boat, adding things together, barking, whatever.
An instance is an object created from a particular class. Dog is a class, and a particular dog named
'Fido' is an instance of the Dog class.
Classes are the templates for an object. To go back to the Dog example, every Dog may have a class attribute 'species'
that has the value 'Canis familiaris'. Every dog can refer to this class attribute as its own. Every dog can also
bark(), wag_tail(), bite(), eat(), drink(), and move(). Since every dog can do this, these are made into methods for
the class.
Now, dogs aren't all the same. We may also want to give instance attributes, where a dog has its own name, age, breed,
height, and weight. These aren't the same for every dog, so we have to tell Python what those values are when we make
the instance of a Dog.
Example of an implementation of this sort of thing can be found in dog.py, with some more notes on classes vs.
instances.
----
When importing specific functions from packages into the environment directly, e.g.
> from <module> import <function>
would recommend using format:
> from <module> import <function1>, <function2>, <functino3>, etc.
In cases where you are importing a module within a package, you can give it a shortened name to use in your script
like so:
> import <package_name>.<sub_package_name>.<module_name> as short_name
And then, when you want to, say, call a function in the particular module that was imported, you can do it like this:
> short_name.do_stuff(foo, bar, spam)
You may also see import statements that look like:
> from <module> import *
What this does, is it imports everything that's inside the module into the main script, so instead of using a function
like:
> module.foo()
you can instead just do foo() and get the function that way, even though it's defined in a different module.
This is not really a very good way of doing things. Usually it's okay, but...
Imagine that you have a function named foo(). Now, the module you're trying to import also contains a function
called foo(). When you import the module, the foo() you have in the MAIN script will be overwritten by the foo()
in the MODULE script. This can lead to confusion and weird behavior, since the two functions may do completely
different things.
Note: I'm putting comments immediately before the line that they're relevant to. You might want to do it differently.
Doesn't really matter, mostly a personal preference thing. Good to be consistent though, whatever you choose.
"""
import os
import datetime
import logging
# This is a local file that is imported into the main script.
import logger_initializer
"""
You can use triple quotes (either single quote ' or double quote ") to make multi-line strings. If you don't assign
these strings to anything, but just have them floating like this, then you can also use them as multi-line comments.
Configuration variables such as these should be in all caps, to distinguish them from variables that are used and
edited by the script as it runs.
Generally, static filepaths, non-changing numbers like the number of days in a week, etc. should be constants.
These variables are not officially constant and unchangeable--it's just a signal to any other people who use
this script that these aren't meant to be changed while the script runs.
Note: filepaths are a pain in the ass. When I'm writing filepaths by hand, I use forward slashes because
it's a little shorter than double backslashes (which you need if you're using backslashes in the Windows filepath, as
it does by default). To join paths safely, use os.path.join() instead of messing with it yourself.
When you have a \<letter> type of thing in a string, like '\n', that's interpreted as something specific by the
computer. These are what we're talking about when we're talking about special characters. The example I have here
is a newline.
Example: Compare 'a\nb' and 'a\\nb'.
In the first one, the computer will print:
a
b
^ Notice the newline! :D
In the second one, the computer will print:
a\nb
Normally, \ symbolizes the start of a special character, but when we use it before another \, it means that
the computer should treat the special character '\' like a regular character.
"""
class ScriptConstants:
"""
This is how you make a class in Python. Normally, classes are made to be used as a sort of template for objects.
But, in this case, I'm using it to wrap up the constants used when running this file as a script. By doing it this
way, I can avoid having these variables overwrite same-named variables that are in the script that imports this
file.
"""
SOURCE_FILEPATH = 'C:/Users/curio/Desktop/test_source_folder'
# Location where files/folders should be placed--this is where we want files to go. We may want to make a folder
# inside of this, and then put the files THERE instead. Either way works.
TARGET_FILEPATH = 'C:/Users/curio/Desktop/test_target_folder'
WINDOWS_DEFAULT_FILE_PATH_SEPARATOR = '\\'
NORMAL_PERSON_FILE_PATH_SEPARATOR = '/'
FILE_EXTENSION_SEPARATOR = '.' # Not its only use, but oh well. Let's be explicit here.
NUM_FILE_EXTENSIONS_IN_FILE_NAME = 1 # I mean, there may be more, but we really only care about one.
class FilePair:
"""
One thing you can do with classes is create a template for objects to keep things organized.
For example, in the script, I could just have a list of paths for the old files, and the paths for where I
want those files to go. But I'm concerned that these lists could end up mismatched for some reason. So I use
objects created from this class to make sure that the right file paths are kept paired together.
"""
# This is a class variable. Note the difference between the assignment here, and the assignments below in the
# __init__() method. EVERY FilePair object that I create will be able to see this attribute, and treat it as though
# it were it's own. BUT, if I change this attribute, then every single FilePair object will now have the new value.
# Every instance of the class will reflect the change, hence class variable vs. instance variable.
_my_class_variable = "Hi! I am a class variable!"
def __init__(self, old_file_path, new_file_path):
"""
An __init__() method is a special Python-defined method that's used when you create an object using a
particular class. This is the method that gets run when you make the object--you can assign initial values
to the object's attributes here, and make sure that the user has given all the values that the object needs.
"""
self.old_file_path = old_file_path
self.new_file_path = new_file_path
def say_hello(self):
print(self._my_class_variable)
def get_path_contents(folder_path):
"""
Get the entire contents of a path. Returns both files and folders at this path, and does not return
filenames contained within sub-folders at this path.
:param folder_path: a string containing a valid path to a computer directory
:return: a list containing names of files and folders in | |
<reponame>seckcoder/lang-learn
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: <NAME> <<EMAIL>>
# Minor fixes by <NAME>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# (parts based on earlier work by <NAME>)
#
# License: BSD Style.
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize, LabelBinarizer
from .utils import array2d, atleast2d_or_csr
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils import check_arrays
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(BaseEstimator, ClassifierMixin):
"""Abstract base class for naive Bayes estimators"""
__metaclass__ = ABCMeta
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='dense')
n_samples, n_features = X.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have incompatible shapes")
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
self.theta_[i, :] = np.mean(X[y == y_i, :], axis=0)
self.sigma_[i, :] = np.var(X[y == y_i, :], axis=0) + epsilon
self.class_prior_[i] = np.float(np.sum(y == y_i)) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in xrange(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / \
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
class_prior : array, shape [n_classes]
Custom prior probability per class.
Overrides the fit_prior parameter.
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X)
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
n_classes = len(self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X and y have incompatible shapes."
if issparse(X):
msg += "\nNote: Sparse matrices cannot be indexed w/ boolean \
masks (use `indices=True` in CV)."
raise ValueError(msg)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior:
if len(class_prior) != n_classes:
raise ValueError(
"Number of priors must match number of classes")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
y_freq = Y.sum(axis=0)
self.class_log_prior_ = np.log(y_freq) - np.log(y_freq.sum())
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
# N_c_i is the count of feature i in all samples of class c.
# N_c is the denominator.
N_c, N_c_i = self._count(X, Y)
self.feature_log_prob_ = np.log(N_c_i) - np.log(N_c.reshape(-1, 1))
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return self.feature_log_prob_[1] if len(self.classes_) == 2 \
else self.feature_log_prob_
def _get_intercept(self):
return self.class_log_prior_[1] if len(self.classes_) == 2 \
else self.class_log_prior_
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha: float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior: boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
Attributes
----------
`intercept_`, `class_log_prior_` : array, shape = [n_classes]
Smoothed empirical log probability for each class.
`feature_log_prob_`, `coef_` : array, shape = [n_classes, n_features]
Empirical log probability of features
given a class, P(x_i|y).
(`intercept_` and `coef_` are properties
referring to `class_log_prior_` and
`feature_log_prob_`, respectively.)
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, Y)
MultinomialNB(alpha=1.0, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see <NAME> et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
"""
def __init__(self, alpha=1.0, fit_prior=True):
self.alpha = alpha
self.fit_prior = fit_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
N_c_i = safe_sparse_dot(Y.T, X) + self.alpha
N_c = np.sum(N_c_i, axis=1)
return N_c, N_c_i
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha: float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize: float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior: boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
Examples
| |
<reponame>jd-jones/seqtools<gh_stars>1-10
import torch
from mathtools import utils
def log_prob(data_scores, y, max_segs, pw=None):
# Scores has shape (num_segs, num_samples, num_classes),
# so marginalizing over number of segments and number of classes at the last
# frame should give us the data likelihood... I think :/
scores = segmental_forward(data_scores, max_segs, pw=pw, semiring='log')
log_Z = torch.logsumexp(scores[-1, :], -1)
if torch.isnan(log_Z).any():
raise ValueError("log_Z contains NaN values")
y_score = segmental_score(data_scores, y, pw=pw)
if torch.isnan(y_score).any():
raise ValueError("y_score contains NaN values")
return -y_score - log_Z
def segmental_score(data_scores, y, pw=None):
score = 0
start_index = 0
prev_label = None
for seg_label, seg_len in utils.genSegments(y):
next_start_index = start_index + seg_len + 1
score += data_scores[start_index:next_start_index, seg_label].sum()
if start_index > 0:
score += pw[prev_label, seg_label]
start_index = next_start_index
prev_label = seg_label
return score
def segmental_forward(x, max_dur, pw=None):
# From S&C NIPS 2004
T, n_classes = x.shape
scores = torch.full([T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
# classes_prev = torch.ones([T, n_classes], np.int)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
# initialize first segment scores
integral_scores = torch.cumsum(x, 0)
scores[0] = integral_scores[0]
def dur_score(t_end, duration, c):
t_start = t_end - duration
current_segment = integral_scores[t_end, c] - integral_scores[t_start, c]
# Elementwise semiring times
dur_scores = scores[t_start, :] + current_segment + pw[:, c]
# Reduction: semiring plus
# FIXME: change max to logsumexp
return dur_scores.max()
# Compute scores per timestep
for t_end in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Compute over all durations
best_dur_scores = torch.tensor(
[
dur_score(t_end, duration, c)
for duration in range(1, min(t_end, max_dur) + 1)
]
)
# FIXME: change max to logsumexp
best_score = best_dur_scores.max()
# Add cost of curent frame to best previous cost
scores[t_end, c] = best_score
return scores
def segmental_viterbi(x, max_dur, pw=None):
# From S&C NIPS 2004
T, n_classes = x.shape
scores = torch.full([T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
lengths = torch.ones([T, n_classes], dtype=torch.long)
# classes_prev = torch.ones([T, n_classes], np.int)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
# initialize first segment scores
integral_scores = torch.cumsum(x, 0)
scores[0] = integral_scores[0]
# -------- Forward -----------
# Compute scores per timestep
for t_end in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Compute over all durations
best_dur = 0
best_score = -float("Inf")
# best_class = -1
for duration in range(1, min(t_end, max_dur) + 1):
t_start = t_end - duration
current_segment = integral_scores[t_end, c] - integral_scores[t_start, c]
if t_start == 0 and current_segment > best_score:
best_dur = duration
best_score = current_segment
# best_class = -1
continue
# Check if it is cheaper to create a new segment or stay in same class
for c_prev in range(n_classes):
if c_prev == c:
continue
# Previous segment, other class
tmp = scores[t_start, c_prev] + current_segment + pw[c_prev, c]
if tmp > best_score:
best_dur = duration
best_score = tmp
# best_class = c_prev
# Add cost of curent frame to best previous cost
scores[t_end, c] = best_score
lengths[t_end, c] = best_dur
# classes_prev[t_end, c] = best_class
# Set nonzero entries to 0 for visualization
# scores[scores<0] = 0
scores[torch.isinf(scores)] = 0
# -------- Backward -----------
classes = [scores[-1].argmax()]
times = [T]
t = T - lengths[-1, classes[-1]]
while t > 0:
class_prev = scores[t].argmax()
length = lengths[t, class_prev]
classes.insert(0, class_prev)
times.insert(0, t)
t -= length
y_out = torch.zeros(T, torch.long)
t = 0
for c, l in zip(classes, times):
y_out[t:t + l] = c
t += l
return scores
def segmental_forward_normalized(x, max_segs, pw=None):
""" This version maximizes!!! """
# Assumes segment function is normalized by duration: f(x)= 1/d sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
integral_scores = torch.cumsum(x, 0)
# Intial scores
scores[0] = integral_scores.copy()
starts = torch.zeros([max_segs, n_classes], torch.long) + 1
# Compute scores for each segment in sequence
for m in range(1, max_segs):
# Compute score for each class
for c in range(n_classes):
best_score = -float("Inf")
for c_prev in range(n_classes):
if c_prev == c:
continue
# Compute scores for each timestep
for t in range(1, T):
new_segment = integral_scores[t, c] - integral_scores[starts[m, c], c]
# Previous segment, other class
score_change = scores[m - 1, t, c_prev] + pw[c_prev, c]
if score_change > best_score:
best_score = score_change
starts[m, c] = t
# Add cost of curent frame to best previous cost
scores[m, t, c] = best_score + new_segment
# Set nonzero entries to 0 for visualization
scores[torch.isinf(scores)] = 0
return scores
def sparsify_incoming_pw(pw):
# Output is INCOMING transitions
n_classes = pw.shape[0]
valid = torch.nonzero(~torch.isinf(pw.T), as_tuple=True) # requires pytorch 1.3
sparse_idx = [[] for i in range(n_classes)]
for i, j in zip(valid[0], valid[1]):
sparse_idx[i] += [j]
return sparse_idx
def log_prob_eccv(data_scores, y, max_segs, pw=None):
# Scores has shape (num_segs, num_samples, num_classes),
# so marginalizing over number of segments and number of classes at the last
# frame should give us the data likelihood... I think :/
scores = segmental_forward_eccv(data_scores, max_segs, pw=pw, semiring='log')
log_Z = torch.logsumexp(torch.logsumexp(scores[:, -1, :], 0), -1)
if torch.isnan(log_Z).any():
raise ValueError("log_Z contains NaN values")
y_score = segmental_score(data_scores, y, pw=pw)
if torch.isnan(y_score).any():
raise ValueError("y_score contains NaN values")
return -y_score - log_Z
def segmental_forward_eccv(x, max_segs, pw=None, semiring='tropical'):
if torch.isnan(x).any():
raise ValueError("x contains NaN values")
if semiring == 'tropical':
def sr_prod(x, y):
return x + y
def sr_sum(x):
return x.max()
elif semiring == 'log':
def sr_prod(x, y):
return x + y
def sr_sum(x):
return torch.logsumexp(x, 0)
else:
raise AssertionError()
# Assumes segment function is additive: f(x)=sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# print("pw is None: Using uniform transition weights (no self-loops)")
# initialize first segment scores
scores[0] = torch.cumsum(x, 0)
# Compute scores per segment
for m in range(1, max_segs):
# Compute scores per timestep
for t in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Elementwise semiring times
new_scores = torch.cat(
(scores[m, t - 1, c:c + 1], sr_prod(scores[m - 1, t - 1, :], pw[:, c]))
)
# Reduction: semiring plus
best_prev = sr_sum(new_scores)
# Add cost of curent frame to best previous cost
scores[m, t, c] = sr_prod(best_prev, x[t, c])
if torch.isnan(scores).any():
raise ValueError("scores contains NaN values")
return scores
def segmental_backward_eccv(scores, pw=None):
n_segs, T, n_classes = scores.shape
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# print("pw is None: Using uniform transition weights (no self-loops)")
best_scores = scores[:, -1].max(1).values
n_segs = torch.argmax(best_scores)
# Start at end
seq_c = [scores[n_segs, -1].argmax()] # Class
seq_t = [T] # Time
m = n_segs
for t in range(T, -1, -1):
if m == 0:
break
# Scores of previous timestep in current segment
score_same = scores[m, t - 1, seq_c[0]]
score_diff = scores[m - 1, t - 1] + pw[:, seq_c[0]]
# Check if it's better to stay or switch segments
if any(score_diff > score_same):
next_class = score_diff.argmax()
score_diff = score_diff[next_class]
seq_c.insert(0, next_class)
seq_t.insert(0, t)
m -= 1
elif all(score_diff == score_same):
m -= 1
seq_t.insert(0, 0)
if m != 0:
raise AssertionError("Found " + str(m) + " segments, but expected zero!")
y_out = torch.full((T,), -1, dtype=torch.long, device=scores.device)
for i in range(len(seq_c)):
y_out[seq_t[i]:seq_t[i + 1]] = seq_c[i]
return y_out
def segmental_inference(
x, max_segs=None, pw=None, normalized=False, verbose=False, return_scores=False):
# Scores has shape (num_segs, num_samples, num_classes)
scores = segmental_forward_eccv(x, max_segs, pw)
y_out = segmental_backward_eccv(scores, pw)
if return_scores:
num_segs = len(utils.segment_labels(y_out))
y_idxs = torch.arange(y_out.numel)
y_scores = scores[num_segs, y_idxs, y_out]
return y_out, y_scores
return y_out
def segmental_forward_oracle(x, max_segs, pw, y_oracle, oracle_valid):
# Assumes segment function is additive: f(x)=sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
lengths = torch.zeros([max_segs, T, n_classes], torch.long)
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# initialize first segment scores
scores[0] = torch.cumsum(x, 0)
# Compute scores per segment
for m in range(1, max_segs):
# scores[m, 0, c] = scores[m-1, 0, c]
# Compute scores per timestep
for t in range(1, T):
# Compute scores per | |
<reponame>yuzeming/angr-management<filename>angrmanagement/ui/widgets/qoperand.py<gh_stars>1-10
from typing import Optional
import logging
from PySide2.QtWidgets import QApplication, QGraphicsSimpleTextItem
from PySide2.QtCore import Qt, QRectF, QPointF
from angr.analyses.disassembly import ConstantOperand, RegisterOperand, MemoryOperand, Value
from ...logic.disassembly.info_dock import OperandDescriptor, OperandHighlightMode
from .qgraph_object import QCachedGraphicsItem
l = logging.getLogger('ui.widgets.qoperand')
class QOperand(QCachedGraphicsItem):
BRANCH_TARGETS_SPACING = 5
LABEL_VARIABLE_SPACING = 5
VARIABLE_IDENT_SPACING = 5
def __init__(self, workspace, func_addr, disasm_view, disasm, infodock, insn, operand, operand_index,
is_branch_target, is_indirect_branch, branch_targets, config, parent=None):
super().__init__(parent=parent)
self.workspace = workspace
self.func_addr = func_addr
self.disasm_view = disasm_view
self.disasm = disasm
self.infodock = infodock
self.variable_manager = infodock.variable_manager
self.insn = insn
self.operand = operand
self.operand_index = operand_index
self.is_branch_target = is_branch_target
self.is_indirect_branch = is_indirect_branch
self.branch_targets = branch_targets
self._branch_target: Optional[int] = None
# the variable involved
self.variable = None
self._cachy = None
self._config = config
# "widgets"
self._label = None
self._label_item: Optional[QGraphicsSimpleTextItem] = None
self._variable_label = None
self._variable_label_item: Optional[QGraphicsSimpleTextItem] = None
self._variable_ident = None
self._variable_ident_item: Optional[QGraphicsSimpleTextItem] = None
self._branch_targets = None
self._branch_targets_text = None
self._branch_targets_item: Optional[QGraphicsSimpleTextItem] = None
self._is_target_func = None
self._width = None
self._height = None
self._init_widgets()
#
# Properties
#
@property
def text(self):
return self._label
@property
def is_constant(self):
return isinstance(self.operand, ConstantOperand)
@property
def constant_value(self):
if self.is_constant:
return self.operand.cs_operand.imm
return None
@property
def is_constant_memory(self):
return (isinstance(self.operand, MemoryOperand) and
len(self.operand.values) == 1 and
isinstance(self.operand.values[0], Value) and
isinstance(self.operand.values[0].val, int)
)
@property
def constant_memory_value(self):
if self.is_constant_memory:
return self.operand.values[0].val
return None
@property
def selected(self):
return self.infodock.is_operand_selected(self.insn.addr, self.operand_index)
@property
def operand_descriptor(self):
return OperandDescriptor(self.text, None,
func_addr=self.func_addr,
variable_ident=self.variable.ident if self.variable is not None else None)
#
# Event handlers
#
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
selected = self.infodock.toggle_operand_selection(
self.insn.addr,
self.operand_index,
self.operand_descriptor,
insn_pos=self.parentItem().scenePos(),
unique=QApplication.keyboardModifiers() != Qt.ControlModifier
)
if selected:
# select the current instruction, too
self.infodock.select_instruction(self.insn.addr, insn_pos=QPointF(self.x(), self.y()), unique=True)
else:
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
button = event.button()
if button == Qt.LeftButton:
if self._branch_target is not None:
self.disasm_view.jump_to(self._branch_target, src_ins_addr=self.insn.addr, use_animation=True)
return
if self.is_constant:
self.disasm_view.jump_to(self.constant_value, src_ins_addr=self.insn.addr, use_animation=True)
return
if self.is_constant_memory:
self.disasm_view.jump_to(self.constant_memory_value, src_ins_addr=self.insn.addr, use_animation=True)
else:
super().mouseDoubleClickEvent(event)
#
# Public methods
#
def refresh(self):
self._layout_items_and_update_size()
self.recalculate_size()
def paint(self, painter, option, widget): #pylint: disable=unused-argument
# Background
if self.selected:
painter.setPen(self._config.disasm_view_operand_select_color)
painter.setBrush(self._config.disasm_view_operand_select_color)
painter.drawRect(0, 0, self.width, self.height)
else:
for _, selected_operand_desc in self.infodock.selected_operands.items():
if self._equals_for_highlighting_purposes(selected_operand_desc):
painter.setBrush(self._config.disasm_view_operand_highlight_color)
painter.setPen(self._config.disasm_view_operand_highlight_color)
painter.drawRect(0, 0, self.width, self.height)
break
#
# Private methods
#
def _branch_target_for_operand(self, operand, branch_targets):
if not branch_targets:
return None
if len(branch_targets) == 1:
return next(iter(branch_targets))
# there are more than one targets
# we pick the one that complies with the operand's text
# my solution is pretty hackish...
imm = self.constant_value
if imm is not None and imm in branch_targets:
# problem solved
return imm
else:
# umm why?
pass
# try to render it
rendered = operand.render()[0]
for t in branch_targets:
if rendered in ("%x" % t, "%#x" % t):
return t
if t == rendered:
return t
# ouch not sure what to do
l.warning('Cannot determine branch targets for operand "%s". Please report on GitHub.', rendered)
# return a random one
return next(iter(branch_targets))
@staticmethod
def _first_n_branch_targets(branch_targets, n):
if not branch_targets:
return [ ]
return list(branch_targets)[ : n]
def _init_widgets(self):
if self.is_branch_target:
# a branch instruction
is_target_func = bool(self.branch_targets is not None
and next(iter(self.branch_targets)) in self.disasm.kb.functions)
self._label = self.operand.render()[0]
self._is_target_func = is_target_func
if self.is_indirect_branch:
# indirect jump
self._branch_targets = self.branch_targets
first_n_targets = self._first_n_branch_targets(self._branch_targets, 3)
if first_n_targets:
targets = [ ]
for t in first_n_targets:
txt = None
if is_target_func:
# try to get a function
try:
target_func = self.disasm.kb.functions.get_by_addr(t)
txt = target_func.demangled_name
except KeyError:
pass
# is the address a label?
if txt is None and t in self.disasm.kb.labels:
txt = self.disasm.kb.labels[t]
if txt is None:
# use the hex text
txt = "%#08x" % t
targets.append(txt)
self._branch_targets_text = "[ " + ", ".join(targets) + " ]"
else:
self._branch_targets_text = "[ unknown ]"
if self._branch_targets and len(self._branch_targets) == 1:
self._branch_target = next(iter(self._branch_targets))
else:
self._branch_target = self._branch_target_for_operand(self.operand, self.branch_targets)
else:
# not a branch
formatting = {}
if isinstance(self.operand, MemoryOperand):
variable_sort = 'memory'
elif isinstance(self.operand, RegisterOperand):
variable_sort = 'register'
else:
variable_sort = None
# without displaying variable
self._label = self.operand.render(formatting=formatting)[0]
if variable_sort:
# try find the corresponding variable
variable_and_offsets = self.variable_manager[self.func_addr].find_variables_by_insn(self.insn.addr,
variable_sort
)
if variable_and_offsets:
variable, offset = self._pick_variable(variable_and_offsets)
if variable is not None:
self.variable = variable
self._variable_ident = "<%s>" % variable.ident
if offset is None:
offset = 0
variable_str = variable.name
ident = (self.insn.addr, 'operand', self.operand_index)
if 'custom_values_str' not in formatting: formatting['custom_values_str'] = { }
if variable_sort == 'memory':
if offset == 0: custom_value_str = variable_str
else: custom_value_str = "%s[%d]" % (variable_str, offset)
else:
custom_value_str = ''
##
# Hacks
##
if self.infodock.induction_variable_analysis is not None:
r = self.infodock.induction_variable_analysis.variables.get(variable.ident, None)
if r is not None and r.expr.__class__.__name__ == "InductionExpr":
custom_value_str = "i*%d+%d" % (r.expr.stride, r.expr.init)
if r is not None and r.expr.__class__.__name__ == "Add" and r.expr.operands[0].__class__.__name__ == "InductionExpr":
custom_value_str = "i*%d+%d" % (r.expr.operands[0].stride, r.expr.operands[0].init + r.expr.operands[1].value)
formatting['custom_values_str'][ident] = custom_value_str
if 'values_style' not in formatting: formatting['values_style'] = { }
formatting['values_style'][ident] = 'curly'
# with variable displayed
if variable_sort == 'memory':
self._variable_label = self.operand.render(formatting=formatting)[0]
else:
self._variable_label = ''
if self._branch_target or self._branch_targets:
if self._is_target_func:
label_color = self._config.disasm_view_target_addr_color
else:
label_color = self._config.disasm_view_antitarget_addr_color
else:
if self.is_constant:
label_color = self._config.disasm_view_operand_constant_color
else:
label_color = self._config.disasm_view_operand_color
# label
# [rax]
self._label_item = QGraphicsSimpleTextItem(self._label, self)
self._label_item.setFont(self._config.disasm_font)
self._label_item.setBrush(label_color)
# variable
# {s_10}
if self.disasm_view.show_variable and self._variable_label:
self._variable_label_item = QGraphicsSimpleTextItem(self._variable_label, self)
self._variable_label_item.setFont(self._config.disasm_font)
self._variable_label_item.setBrush(self._config.disasm_view_variable_label_color)
# additional branch targets
if self._branch_targets_text:
self._branch_targets_item = QGraphicsSimpleTextItem(self._branch_targets_text, self)
self._branch_targets_item.setFont(self._config.disasm_font)
self._branch_targets_item.setBrush(Qt.darkYellow) # TODO: Expose as a configuration entry in Config
# variable identifier
if self.variable is not None and self.disasm_view.show_variable_identifier:
self._variable_ident_item = QGraphicsSimpleTextItem(self._variable_ident, self)
self._variable_ident_item.setFont(self._config.disasm_font)
self._variable_ident_item.setBrush(Qt.darkGreen) # TODO: Expose as a configuration entry in Config
self._layout_items_and_update_size()
def _layout_items_and_update_size(self):
x, y = 0, 0
# label
self._label_item.setPos(x, y)
x += self._label_item.boundingRect().width()
# variable
if self._variable_label_item is not None:
x += self.LABEL_VARIABLE_SPACING
self._variable_label_item.setPos(x, y)
x += self._variable_label_item.boundingRect().width()
# additional branch targets
if self._branch_targets_item is not None:
x += self.BRANCH_TARGETS_SPACING
self._branch_targets_item.setPos(x, y)
x += self._branch_targets_item.boundingRect().width()
# variable identifier
if self._variable_ident_item is not None:
x += self.VARIABLE_IDENT_SPACING
self._variable_ident_item.setPos(x, y)
x += self._variable_ident_item.boundingRect().width()
self._width = x
self._height = self._label_item.boundingRect().height()
self.recalculate_size()
def _boundingRect(self):
return QRectF(0, 0, self._width, self._height)
def _pick_variable(self, variable_and_offsets):
"""
Pick the corresponding variable for the current operand.
:param list variable_and_offsets: A list of variables and the offsets into each variable.
:return: A tuple of variable and the offset.
:rtype: tuple
"""
if isinstance(self.operand, MemoryOperand):
if len(variable_and_offsets) > 1:
l.error("Instruction %#x has two memory operands. Please report it on GitHub.", self.insn.addr)
return variable_and_offsets[0]
elif isinstance(self.operand, RegisterOperand):
# there might be multiple register-type variables for an instruction. pick the right one is... not easy
the_reg = self.operand.register
if the_reg is None:
# huh, it does not have a Register child
return None, None
reg_name = the_reg.reg
arch = self.workspace.instance.project.arch
if len(variable_and_offsets) == 1:
# only one candidate...
var, offset = variable_and_offsets[0]
if arch.registers[reg_name][0] == var.reg:
return var, offset
return None, None
if self.operand_index > 0:
# this is the source operand
# which variable is read here?
for var, offset in variable_and_offsets:
if arch.registers[reg_name][0] == var.reg:
if self._variable_has_access(var, self.insn.addr, 'read'):
return var, offset
l.debug('Cannot find any source variable for operand %d at instruction %#x.',
self.operand_index,
self.insn.addr
)
return None, None
# this is the destination operand
# which variable is written here?
for var, offset in variable_and_offsets:
if arch.registers[reg_name][0] == var.reg:
if self._variable_has_access(var, self.insn.addr, 'write'):
return var, offset
l.debug('Cannot find any destination variable for operand %d at instruction %#x.',
self.operand_index,
self.insn.addr
)
# just return the first one
return None, None
else:
# what's this type? why am I here?
l.error('_pick_variable: Unsupported operand type %s.', self.operand.__class__)
return None, None
def _variable_has_access(self, variable, ins_addr, access_type):
if variable not in self.variable_manager[self.func_addr]._variable_accesses:
l.error('Variable %s does not have any accessing records.', variable)
return False
accesses = self.variable_manager[self.func_addr]._variable_accesses[variable]
for access in accesses:
if access.location.ins_addr == ins_addr and access.access_type == access_type:
return True
return False
def _equals_for_highlighting_purposes(self, other):
"""
:param OperandDescriptor other: The other operand to compare with.
:return:
"""
if other is None:
return False
highlight_mode = self.infodock.highlight_mode
if highlight_mode == OperandHighlightMode.SAME_TEXT or self.variable is None:
# when there is no related variable, we highlight as long as | |
<filename>test/test_nnapi.py
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import os
import ctypes
import torch
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_utils import TestCase, run_tests
def qpt(t, scale, zero_point, dtype=torch.quint8):
t = torch.tensor(t)
return torch.quantize_per_tensor(t, scale, zero_point, dtype)
def nhwc(t):
t = t.clone().contiguous(memory_format=torch.channels_last)
t.nnapi_nhwc = True
return t
class TestNNAPI(TestCase):
def setUp(self):
# Avoid saturation in fbgemm
torch.backends.quantized.engine = 'qnnpack'
libneuralnetworks_path = os.environ.get("LIBNEURALNETWORKS_PATH")
if libneuralnetworks_path:
ctypes.cdll.LoadLibrary(libneuralnetworks_path)
print("Will attempt to run NNAPI models.")
self.can_run_nnapi = True
else:
self.can_run_nnapi = False
# Created for easy override by subclasses (eg TestNnapiBackend)
def call_lowering_to_nnapi(self, traced_module, args):
return convert_model_to_nnapi(traced_module, args)
# Created for subclasses to set can_run_nnapi (eg TestNnapiBackend)
def set_can_run_nnapi(self, can_run):
self.can_run_nnapi = can_run
def check(
self,
module,
arg_or_args,
*,
trace_args=None,
convert_args=None,
atol_rtol=None,
limit=None,
expected_memory_format=None
):
with torch.no_grad():
if isinstance(arg_or_args, torch.Tensor):
args = [arg_or_args]
else:
args = arg_or_args
module.eval()
traced = torch.jit.trace(module, trace_args or args)
nnapi_module = self.call_lowering_to_nnapi(traced, convert_args or args)
if not self.can_run_nnapi:
# Only test that the model was converted successfully.
return
eager_output = module(*args)
nnapi_output = nnapi_module(*args)
kwargs = {}
if atol_rtol is not None:
kwargs["atol"] = atol_rtol[0]
kwargs["rtol"] = atol_rtol[1]
self.assertEqual(eager_output, nnapi_output, **kwargs)
if limit is not None:
mismatches = \
eager_output.int_repr().to(torch.int32) - \
nnapi_output.int_repr().to(torch.int32)
if mismatches.count_nonzero() > limit:
# Too many mismatches. Re-run the check with no tolerance
# to get a nice message.
self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)
if expected_memory_format:
self.assertTrue(nnapi_output.is_contiguous(memory_format=expected_memory_format))
def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
torch.manual_seed(29)
inp_quant = qpt(inp_float, 0.03, 128)
return [
("float", inp_float),
("float-nhwc", nhwc(inp_float)),
("quant", inp_quant),
("quant-nhwc", nhwc(inp_quant)),
]
def test_prelu(self):
arg = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
single_a = torch.nn.PReLU()
self.check(single_a, arg)
multi_a = torch.nn.PReLU(4)
with torch.no_grad():
multi_a.weight.copy_(torch.tensor([.1, .2, .3, .4]))
self.check(multi_a, nhwc(arg))
# Test flexible size
self.check(
multi_a,
arg,
trace_args=[torch.zeros(1, 4, 3, 3)],
convert_args=[nhwc(torch.zeros(1, 4, 0, 0))],
)
def test_quantize(self):
self.check(
torch.nn.quantized.Quantize(0.25, 2, torch.quint8),
nhwc(torch.tensor([[[[1.0]], [[2.0]]]])))
def test_dequantize(self):
self.check(
torch.nn.quantized.DeQuantize(),
nhwc(qpt([[[[1.0]], [[2.0]]]], 0.25, 2)))
def test_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, arg):
return arg.unsqueeze(self.dim)
self.check(UnsqueezeModule(-2), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(-1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(0), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(2), torch.randn(4, 2, 2))
def test_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, arg):
return arg.reshape(self.shape)
self.check(
ReshapeModule((2, 4)),
torch.randn(4, 2, 1, 1))
self.check(
ReshapeModule((8, -1)),
nhwc(torch.randn(4, 2, 1, 1)))
with self.assertRaisesRegex(Exception, "target size"):
self.check(
ReshapeModule((2, 4)),
nhwc(torch.randn(4, 2, 1, 1)))
def test_flatten(self):
for mod in [
torch.nn.Flatten(),
torch.nn.Flatten(start_dim=2, end_dim=3),
torch.nn.Flatten(start_dim=2, end_dim=4),
torch.nn.Flatten(start_dim=0, end_dim=-2),
torch.nn.Flatten(start_dim=0, end_dim=4)
]:
self.check(mod, torch.randn(4, 2, 1, 3, 7))
# flex inputs
self.check(
torch.nn.Flatten(),
torch.randn(4, 2, 1, 3, 7),
convert_args=[torch.zeros(0, 2, 1, 3, 7)]
)
# channels last
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 1, 4, 7))
)
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 3, 1, 1))
)
# Exceptions
with self.assertRaisesRegex(Exception, "not supported on NHWC"):
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(1, 3, 4, 4))
)
with self.assertRaisesRegex(Exception, "Flattening flexible dims is not supported yet"):
self.check(torch.nn.Flatten(), torch.randn(4, 2, 0, 0, 7))
with self.assertRaisesRegex(Exception, "Only 1 dim"):
self.check(
torch.nn.Flatten(start_dim=1, end_dim=-2),
torch.randn(0, 2, 1, 3, 0))
def test_slice(self):
class SliceModule(torch.nn.Module):
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, t):
return t[1:, self.start:self.stop:self.step, :]
class SliceModule2(torch.nn.Module):
def forward(self, t):
return t[3:]
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2)
)
self.check(
SliceModule2(),
torch.randn(5)
)
# flex inputs
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(4, 6, 0)]
)
with self.assertRaisesRegex(Exception, "slice with flexible shape"):
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(0, 0, 0)]
)
def test_cat(self):
class CatModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, t1, t2):
return torch.cat([t1, t2], self.dim)
self.check(
CatModule(0),
[
torch.randn(1, 2, 3, 3),
torch.randn(2, 2, 3, 3),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
])
self.check(
CatModule(1),
[
nhwc(torch.randn(1, 2, 3, 3)),
nhwc(torch.randn(1, 4, 3, 3)),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
convert_args=[
torch.zeros(0, 0, 0, 0),
torch.zeros(0, 0, 0, 0)
])
def test_pointwise_unary(self):
for op in ["relu", "sigmoid"]:
with self.subTest(op):
class UnaryModule(torch.nn.Module):
def forward(self, arg):
if op == "relu":
return torch.nn.functional.relu(arg)
if op == "sigmoid":
return torch.sigmoid(arg)
raise Exception("Bad op")
self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))
self.check(
UnaryModule(),
qpt(torch.tensor([-1.0, 1.0]), 1. / 256, 0),
)
def test_pointwise_binary(self):
for op in ["add", "sub", "mul", "div"]:
with self.subTest(op):
class BinaryModule(torch.nn.Module):
def forward(self, lhs, rhs):
if op == "add":
return lhs + rhs
if op == "sub":
return lhs - rhs
if op == "mul":
return lhs * rhs
if op == "div":
return lhs / rhs
raise Exception("Bad op")
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([3.0, 4.0]),
])
self.check(
BinaryModule(),
[
torch.tensor([[1.0, 2.0]]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
with self.assertRaisesRegex(Exception, "Non-equal-rank broadcast"):
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
def test_pointwise_binary_const(self):
const = torch.randn(1, 4, 6, 6)
class ArgPlusConst(torch.nn.Module):
def forward(self, arg):
return arg + const
class ConstPlusArg(torch.nn.Module):
def forward(self, arg):
return const + arg
arg_contig = torch.randn(2, 4, 6, 6)
arg_nhwc = nhwc(torch.randn(2, 4, 6, 6))
for mod_class in [ArgPlusConst, ConstPlusArg]:
for use_nhwc in [False, True]:
with self.subTest(mod_class=mod_class.__name__, use_nhwc=use_nhwc):
arg = arg_nhwc if use_nhwc else arg_contig
memory_format = torch.channels_last if use_nhwc else torch.contiguous_format
self.check(mod_class(), arg,
expected_memory_format=memory_format)
def test_hardtanh(self):
inp = torch.tensor([-2.0, -0.5, 0.5, 2.0, 7.0])
self.check(torch.nn.Hardtanh(), inp)
self.check(torch.nn.Hardtanh(0.0, 6.0), inp)
with self.assertRaisesRegex(Exception, "hardtanh with args"):
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
def test_softmax(self):
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
self.check(torch.nn.Softmax(), inp)
self.check(torch.nn.Softmax(dim=0), inp)
# Test flexible size
self.check(
torch.nn.Softmax(),
inp,
convert_args=[torch.zeros(0, 0)],
)
def test_to(self):
class ToCPU(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
y = x.to("cpu")
# add prelu since input operand can't be output
return self.prelu(y)
arg = torch.randn(1, 2, 3, 3)
self.check(ToCPU(), arg)
# Test flexible size
self.check(
ToCPU(),
arg,
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_detach(self):
class DetachModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.detach()
return torch.nn.functional.relu(y)
self.check(DetachModule(), torch.randn(1, 2, 3, 3))
self.check(
DetachModule(), torch.randn(1, 2, 3, 3),
convert_args=[torch.zeros(1, 2, 0, 0)])
def test_log_softmax(self):
inp = torch.randn(3, 10)
self.check(torch.nn.LogSoftmax(), inp)
self.check(torch.nn.LogSoftmax(0), inp)
def test_mean(self):
class MeanModule(torch.nn.Module):
def __init__(self, dim, keep=False):
super().__init__()
self.dim = dim
self.keep = keep
def forward(self, t):
return torch.mean(t, dim=self.dim, keepdim=self.keep)
self.check(MeanModule(0), torch.randn(2, 3))
self.check(MeanModule(1), torch.randn(2, 3))
self.check(MeanModule([2, 3]), torch.randn(2, 3, 6, 6))
self.check(MeanModule([2, 3]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2], keep=True), nhwc(torch.randn(2, 3, 6, 6)))
def test_max_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.MaxPool2d(2), inp)
self.check(torch.nn.MaxPool2d((3, 4)), inp)
self.check(torch.nn.MaxPool2d((3, 4), (1, 2)), inp)
def test_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
atol_rtol = None
limit = None
convert_dims = (2, 3, 0, 0)
convert_arg = torch.zeros(*convert_dims)
for model in (
torch.nn.AvgPool2d(2),
torch.nn.AvgPool2d((3, 4)),
torch.nn.AvgPool2d((3, 4), (1, 2))):
if "quant" in name:
atol_rtol = (1, 0)
limit = model(inp).numel()
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in name:
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_adaptive_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.AdaptiveAvgPool2d((1, 1)), inp)
with self.assertRaisesRegex(Exception, "with output size"):
self.check(torch.nn.AdaptiveAvgPool2d((2, 2)), inp)
def test_upsample_nearest2d(self):
convert_args = dict(self.float_and_quant_and_nhwc(torch.randn(2, 3, 0, 0), 0.3, 128))
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.UpsamplingNearest2d(size=(16, 20)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(24, 32)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(36, 48)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(1.5, 1.5)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(3.0, 3.0)), inp)
self.check(
torch.nn.UpsamplingNearest2d(size=(24, 32)), inp,
convert_args=[convert_args[name]]
)
self.check(
torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp,
convert_args=[convert_args[name]]
)
def test_linear(self):
torch.manual_seed(29)
self.check(torch.nn.Linear(16, 32), torch.randn(2, 16))
self.check(
torch.nn.Linear(16, 32), torch.randn(2, 16),
convert_args=[torch.zeros(0, 16)])
def test_conv2d(self):
cases = [
# in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name
( 4, 8, (3, 3), 1, 0, 1, 1, (2, 4, 16, 16), "3x3"), # noqa: E201,E241
( 4, 8, (3, 3), 1, 0, 1, 0, (2, 4, 16, 16), "3x3nobias"), # noqa: E201,E241
( 4, 16, (3, 3), 1, 1, 1, 1, (2, 4, 16, 16), "3x3p1"), # noqa: E201,E241
( 8, 8, (3, 3), 2, 0, 1, 1, (2, 8, 16, 16), "3x3s2"), # noqa: E201,E241
( 4, 8, (5, 5), 1, 0, 1, 1, (2, 4, | |
<filename>google/appengine/ext/analytics/stats.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Data structures to represent statistics used by analysis library.
Appstats data is loaded into data structures defined in this code.
URLStats holds information about all requests of an URL path,
URLRequestStats holds information about a specific request,
RPCStats holds data about a specific RPC category for each request.
"""
from builtins import range
from builtins import object
import logging
from . import entity
from six.moves import range
def _RPCCategory(rpcstatsproto):
"""Categorize Datastore RPCs by entity kind and other information.
The analysis tool presents a breakdown of the request latency into
different RPCs. Simply grouping RPCs with the same service and call name
together is too coarse-grained. E.g., consider a request that
involves two different types of datastore queries on different
entity kinds. More meaningful information to the developer can be
conveyed by presenting time spent in query_kind1, and query_kind2
separately. To handle this, we identify the "category" of an RPC,
and summarize results based on the service name, call name, and
category. At this point, the category field is only relevant for
datastore related RPCs, and is simply '' for all non-datastore RPCs.
For the datastore RPCs, category information usually includes the
relevant entity kind and other information, but the details are
very specific to the individual call.
Args:
rpcstatsproto: IndividualRPCStatsProto from Appstats recording which
represents statistics for a single RPC in a request.
Returns:
A string which indicates category to which the RPC belongs.
Returns '' if category information is not relevant to this RPC.
"""
category = ''
if not rpcstatsproto.has_datastore_details():
return category
servicecallname = rpcstatsproto.service_call_name()
if servicecallname == 'datastore_v3.Put':
category = entity.EntityListKind(
rpcstatsproto.datastore_details().keys_written_list())
elif servicecallname == 'datastore_v3.Get':
category = entity.EntityListKind(
rpcstatsproto.datastore_details().keys_read_list())
elif servicecallname == 'datastore_v3.Next':
category = entity.EntityListKind(
rpcstatsproto.datastore_details().keys_read_list())
elif servicecallname == 'datastore_v3.RunQuery':
if rpcstatsproto.datastore_details().has_query_kind():
kind = rpcstatsproto.datastore_details().query_kind()
else:
kind = 'NoKind'
if rpcstatsproto.datastore_details().has_query_ancestor():
ancestor = '_ANC'
else:
ancestor = ''
category = '%s%s' %(kind, ancestor)
return category
class RPCStats(object):
"""Statistics associated with each RPC call category for a request.
For each RPC call category associated with a URL request, track the number of
calls, and total time spent summed across all calls. For datastore related
RPCs, track list of entities accessed (fetched/written/failed get requests).
"""
_ABBRV = {
'datastore_v3.Put': 'ds.Put',
'datastore_v3.RunQuery': 'ds.Query',
'datastore_v3.Get': 'ds.Get',
'datastore_v3.Next': 'ds.Next',
}
def __init__(self, rpcstatsproto):
"""Initialize stats first time RPC called for that URL request.
Args:
rpcstatsproto: IndividualRPCStatsProto from Appstats recording which
represents statistics for a single RPC in a request.
"""
self.servicecallname = rpcstatsproto.service_call_name()
self.category = _RPCCategory(rpcstatsproto)
self.time = 0
self.numcalls = 0
self.keys_read = []
self.keys_written = []
self.keys_failed_get = []
self.Incr(rpcstatsproto)
def Incr(self, rpcstatsproto):
"""Update stats every time RPC called for that URL request.
Increment number of calls to RPCs in this category by 1 and increment
total time spent in this RPC category by time taken by this particular
RPC. Augment the entities read, written and missed by this RPC category
with the entities read, written and missed by the RPC.
Args:
rpcstatsproto: IndividualRPCStatsProto from Appstats recording which
represents statistics for a single RPC in a request.
"""
self.time += int(rpcstatsproto.duration_milliseconds())
self.numcalls += 1
if rpcstatsproto.has_datastore_details():
self.keys_read.extend(
rpcstatsproto.datastore_details().keys_read_list())
self.keys_written.extend(
rpcstatsproto.datastore_details().keys_written_list())
if self.servicecallname == 'datastore_v3.Get':
hits = rpcstatsproto.datastore_details().get_successful_fetch_list()
entities = rpcstatsproto.datastore_details().keys_read_list()
for index in range(len(hits)):
if not hits[index]:
self.keys_failed_get.append(entities[index])
def GetLabel(self):
"""Get label used to refer to RPC category in graphs."""
label = RPCStats._ABBRV.get(self.servicecallname, self.servicecallname)
if self.category:
label = '%s_%s' %(label, self.category)
return label
def Match(self, rpcstatsproto):
"""Checks if an RPC belongs to the same category as current.
Args:
rpcstatsproto: IndividualRPCStatsProto from Appstats recording which
represents statistics for a single RPC in a request.
Returns:
True or False. True indicates the RPC belongs to same category
as current one. False indicates otherwise.
"""
if rpcstatsproto.service_call_name() != self.servicecallname:
return False
category = _RPCCategory(rpcstatsproto)
if category != self.category:
return False
return True
class URLRequestStats(object):
"""Statistics associated with each URL request.
For each URL request, keep track of list of RPCs, statistics
associated with each RPC, and total response time for that
URL request.
"""
def __init__(self, statsproto):
"""Constructor."""
self.rpcstatslist = []
self.timestamp = statsproto.start_timestamp_milliseconds() * 0.001
self.totalresponsetime = int(statsproto.duration_milliseconds())
for t in statsproto.individual_stats_list():
self.AddRPCStats(t)
self.totalrpctime = self.TotalRPCTime()
def TotalRPCTime(self):
"""Compute total time spent in all RPCs."""
totalrpctime = 0
for rpc in self.rpcstatslist:
totalrpctime += rpc.time
return totalrpctime
def AddRPCStats(self, rpcstatsproto):
"""Update statistics for a given RPC called for that URL request."""
for rpc in self.rpcstatslist:
if rpc.Match(rpcstatsproto):
rpc.Incr(rpcstatsproto)
return
rpcstats = RPCStats(rpcstatsproto)
self.rpcstatslist.append(rpcstats)
def _IncrementCount(self, key_list, group_flag, freq, action):
"""Helper function to increment entity (group) access counts.
Args:
key_list: List of entity keys that were accessed.
group_flag: Boolean. If True, entity group counts are desired.
If False, entity counts are desired.
freq: A dictionary keyed on entity (group) kind and name that
holds counts for reads, writes and misses to that entity (group).
action: Whether the access was a 'read', 'write' or 'miss'.
"""
for key in key_list:
if group_flag:
name = entity.EntityGroupName(key)
kind = entity.EntityGroupKind(key)
kind_name = '%s,%s' %(kind, name)
else:
name = entity.EntityFullName(key)
kind = entity.EntityKind(key)
kind_name = '%s,%s' %(kind, name)
if not kind_name in freq:
freq[kind_name] = {'read': 0, 'write': 0, 'miss': 0}
freq[kind_name][action] += 1
def EntityGroupCount(self):
"""Computes reads/writes/failed gets to each entity group for that request.
Returns:
freq: Dictionary keyed on entity group. Key is of the form
'entitygroupkind,entitygroupname' which allows organizing statistics
of entity groups by their kind. Value is an inner dictionary with 3
keys: 'read', 'write', and 'missed'. Value of each inner dictionary
item is the number of reads/writes/failed gets to that entity group
for the request.
"""
freq = {}
for rpcstats in self.rpcstatslist:
self._IncrementCount(rpcstats.keys_read, True, freq, 'read')
self._IncrementCount(rpcstats.keys_written, True, freq, 'write')
self._IncrementCount(rpcstats.keys_failed_get, True, freq, 'miss')
return freq
def EntityCount(self):
"""Computes number of reads/writes to each entity for that request.
Returns:
freq: Dictionary keyed on entity, with value being number of reads,
writes or failed gets to that entity for the request. The dictionary
key is of the form "entitykind,entityfullname" which allows organizing
statistics of entities by their kind.
"""
freq = {}
for rpcstats in self.rpcstatslist:
self._IncrementCount(rpcstats.keys_read, False, freq, 'read')
self._IncrementCount(rpcstats.keys_written, False, freq, 'write')
self._IncrementCount(rpcstats.keys_failed_get, False, freq, 'miss')
return freq
class URLStats(object):
"""Statistics associated with a given URL.
For each request of that URL, keep track of statistics associated
with that request such as response time, RPCs called, and
statistics associated with the RPC.
"""
def __init__(self, url):
"""Constructor."""
self.url = url
self.urlrequestlist = []
def AddRequest(self, statsproto):
"""Add stats about new request to that URL."""
requeststats = URLRequestStats(statsproto)
self.urlrequestlist.append(requeststats)
def GetResponseTimeList(self):
"""Returns list of response times across all requests of URL."""
responsetimelist = []
for urlrequest in self.urlrequestlist:
responsetimelist.append(urlrequest.totalresponsetime)
return responsetimelist
def GetTotalRPCTimes(self):
"""Returns list of response times across all requests of URL."""
totalrpctimes = []
for request in self.urlrequestlist:
totalrpctimes.append(request.totalrpctime)
return totalrpctimes
def _Count(self, group_flag):
"""Helper function to count accesses to entities (entity groups).
Args:
group_flag: Boolean. If true, count entity groups. If false, count
entities.
Returns:
Dictionary keyed on names of entities (entity groups) with values
corresponding to their access counts.
"""
freq_total = {}
for request in self.urlrequestlist:
if group_flag:
freq_request = request.EntityGroupCount()
else:
freq_request = request.EntityCount()
for name, freq in list(freq_request.items()):
if not name in freq_total:
freq_total[name] = {'read': 0, 'write': 0, 'miss': 0}
freq_total[name]['read'] += freq['read']
freq_total[name]['write'] += freq['write']
freq_total[name]['miss'] += freq['miss']
return freq_total
def EntityGroupCount(self):
"""Get reads/writes/failed gets to each entity group over all URL requests.
Returns:
freq_total: Dict keyed on entity group, with | |
<gh_stars>1-10
'''
Created on Dec 20, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import math, re, sre_constants
from arelle.ModelObject import ModelObject, ModelAttribute
from arelle.ModelValue import (qname, dateTime, DateTime, DATE, DATETIME, dayTimeDuration,
YearMonthDuration, DayTimeDuration, time, Time)
from arelle.FunctionUtil import anytypeArg, atomicArg, stringArg, numericArg, integerArg, qnameArg, nodeArg
from arelle import FunctionXs, XPathContext, XbrlUtil, XmlUtil, UrlUtil, ModelDocument, XmlValidate
from arelle.Locale import format_picture
from arelle.XmlValidate import VALID_NO_CONTENT
from decimal import Decimal
from lxml import etree
DECIMAL_5 = Decimal(.5)
class fnFunctionNotAvailable(Exception):
def __init__(self):
self.args = ("fn function not available",)
def __repr__(self):
return self.args[0]
def call(xc, p, localname, contextItem, args):
try:
if localname not in fnFunctions: raise fnFunctionNotAvailable
return fnFunctions[localname](xc, p, contextItem, args)
except fnFunctionNotAvailable:
raise XPathContext.FunctionNotAvailable("fn:{0}".format(localname))
def node_name(xc, p, contextItem, args):
node = nodeArg(xc, args, 0, "node()?", missingArgFallback=contextItem, emptyFallback=())
if node != ():
return qname(node)
return ()
def nilled(xc, p, contextItem, args):
node = nodeArg(xc, args, 0, "node()?", missingArgFallback=contextItem, emptyFallback=())
if node != () and isinstance(node,ModelObject):
return node.get("{http://www.w3.org/2001/XMLSchema-instance}nil") == "true"
return ()
def string(xc, p, contextItem, args):
if len(args) > 1: raise XPathContext.FunctionNumArgs()
item = anytypeArg(xc, args, 0, "item()?", missingArgFallback=contextItem)
if item == ():
return ''
if isinstance(item, ModelObject) and getattr(item,"xValid", 0) == VALID_NO_CONTENT:
x = item.stringValue # represents inner text of this and all subelements
else:
x = xc.atomize(p, item)
return FunctionXs.xsString( xc, p, x )
def data(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
return xc.atomize(p, args[0])
def base_uri(xc, p, contextItem, args):
item = anytypeArg(xc, args, 0, "node()?", missingArgFallback=contextItem)
if item == ():
return ''
if isinstance(item, (ModelObject, ModelDocument)):
return UrlUtil.ensureUrl(item.modelDocument.uri)
return ''
def document_uri(xc, p, contextItem, args):
return xc.modelXbrl.modelDocument.uri
def error(xc, p, contextItem, args):
if len(args) > 2: raise XPathContext.FunctionNumArgs()
qn = qnameArg(xc, p, args, 0, 'QName?', emptyFallback=None)
msg = stringArg(xc, args, 1, "xs:string", emptyFallback='')
raise XPathContext.XPathException(p, (qn or "err:FOER0000"), msg)
def trace(xc, p, contextItem, args):
raise fnFunctionNotAvailable()
def fn_dateTime(xc, p, contextItem, args):
if len(args) != 2: raise XPathContext.FunctionNumArgs()
date = anytypeArg(xc, args, 0, "xs:date", missingArgFallback=())
time = anytypeArg(xc, args, 1, "xs:time", missingArgFallback=())
if date is None or time is None:
return ()
return dateTime(date) + dayTimeDuration(time)
def fn_abs(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
x = numericArg(xc, p, args)
if math.isinf(x):
x = float('inf')
elif not math.isnan(x):
x = abs(x)
return x
def fn_ceiling(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
math.ceil(numericArg(xc, p, args))
def fn_floor(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
math.floor(numericArg(xc, p, args))
def fn_round(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
x = numericArg(xc, p, args)
if math.isinf(x) or math.isnan(x):
return x
return _INT(x + (DECIMAL_5 if isinstance(x,Decimal) else .5)) # round towards +inf
def fn_round_half_to_even(xc, p, contextItem, args):
if len(args) > 2 or len(args) == 0: raise XPathContext.FunctionNumArgs()
x = numericArg(xc, p, args)
if len(args) == 2:
precision = args[1]
if len(precision) != 1 or not isinstance(precision[0],_INT_TYPES): raise XPathContext.FunctionArgType(2,"integer")
precision = precision[0]
return round(x, precision)
return round(x)
def codepoints_to_string(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
try:
return ''.join(chr(c) for c in args[0])
except TypeError:
XPathContext.FunctionArgType(1,"xs:integer*")
def string_to_codepoints(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
str = stringArg(xc, args, 0, "xs:string", emptyFallback=())
if str == (): return ()
return tuple(ord(c) for c in str)
def compare(xc, p, contextItem, args):
if len(args) == 3: raise fnFunctionNotAvailable()
if len(args) != 2: raise XPathContext.FunctionNumArgs()
comparand1 = stringArg(xc, args, 0, "xs:string?", emptyFallback=())
comparand2 = stringArg(xc, args, 1, "xs:string?", emptyFallback=())
if comparand1 == () or comparand2 == (): return ()
if comparand1 == comparand2: return 0
if comparand1 < comparand2: return -1
return 1
def codepoint_equal(xc, p, contextItem, args):
raise fnFunctionNotAvailable()
def concat(xc, p, contextItem, args):
if len(args) < 2: raise XPathContext.FunctionNumArgs()
atomizedArgs = []
for i in range(len(args)):
item = anytypeArg(xc, args, i, "xs:anyAtomicType?")
if item != ():
atomizedArgs.append( FunctionXs.xsString( xc, p, xc.atomize(p, item) ) )
return ''.join(atomizedArgs)
def string_join(xc, p, contextItem, args):
if len(args) != 2: raise XPathContext.FunctionNumArgs()
joiner = stringArg(xc, args, 1, "xs:string")
atomizedArgs = []
for x in xc.atomize( p, args[0] ):
if isinstance(x, _STR_BASE):
atomizedArgs.append(x)
else:
raise XPathContext.FunctionArgType(0,"xs:string*")
return joiner.join(atomizedArgs)
def substring(xc, p, contextItem, args):
l = len(args)
if l < 2 or l > 3: raise XPathContext.FunctionNumArgs()
string = stringArg(xc, args, 0, "xs:string?")
start = _INT(round( numericArg(xc, p, args, 1) )) - 1
if l == 3:
length = _INT(round( numericArg(xc, p, args, 2) ))
if start < 0:
length += start
if length < 0: length = 0
start = 0
return string[start:start + length]
if start < 0: start = 0
return string[start:]
def string_length(xc, p, contextItem, args):
if len(args) > 1: raise XPathContext.FunctionNumArgs()
return len( stringArg(xc, args, 0, "xs:string", missingArgFallback=contextItem) )
nonSpacePattern = re.compile(r"\S+")
def normalize_space(xc, p, contextItem, args):
if len(args) > 1: raise XPathContext.FunctionNumArgs()
return ' '.join( nonSpacePattern.findall( stringArg(xc, args, 0, "xs:string", missingArgFallback=contextItem) ) )
def normalize_unicode(xc, p, contextItem, args):
raise fnFunctionNotAvailable()
def upper_case(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
return stringArg(xc, args, 0, "xs:string").upper()
def lower_case(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
return stringArg(xc, args, 0, "xs:string").lower()
def translate(xc, p, contextItem, args):
if len(args) != 3: raise XPathContext.FunctionNumArgs()
arg = stringArg(xc, args, 0, "xs:string?", emptyFallback=())
mapString = stringArg(xc, args, 1, "xs:string", emptyFallback=())
transString = stringArg(xc, args, 2, "xs:string", emptyFallback=())
if arg == (): return ()
out = []
for c in arg:
if c in mapString:
i = mapString.index(c)
if i < len(transString):
out.append(transString[i])
else:
out.append(c)
return ''.join(out)
def encode_for_uri(xc, p, contextItem, args):
from urllib.parse import quote
if len(args) != 1: raise XPathContext.FunctionNumArgs()
return quote(stringArg(xc, args, 0, "xs:string"))
def iri_to_uri(xc, p, contextItem, args):
return encode_for_uri(xc, p, contextItem, args)
def escape_html_uri(xc, p, contextItem, args):
return encode_for_uri(xc, p, contextItem, args)
def contains(xc, p, contextItem, args):
return substring_functions(xc, args, contains=True)
def starts_with(xc, p, contextItem, args):
return substring_functions(xc, args, startEnd=True)
def ends_with(xc, p, contextItem, args):
return substring_functions(xc, args, startEnd=False)
def substring_before(xc, p, contextItem, args):
return substring_functions(xc, args, beforeAfter=True)
def substring_after(xc, p, contextItem, args):
return substring_functions(xc, args, beforeAfter=False)
def substring_functions(xc, args, contains=None, startEnd=None, beforeAfter=None):
if len(args) == 3: raise fnFunctionNotAvailable()
if len(args) != 2: raise XPathContext.FunctionNumArgs()
string = stringArg(xc, args, 0, "xs:string?")
portion = stringArg(xc, args, 1, "xs:string")
if contains == True:
return portion in string
elif startEnd == True:
return string.startswith(portion)
elif startEnd == False:
return string.endswith(portion)
elif beforeAfter is not None:
if portion == '': return ''
try:
if beforeAfter: return string.partition( portion )[0]
else: return string.rpartition( portion )[2]
except ValueError:
return ''
raise fnFunctionNotAvailable() # wrong arguments?
def regexFlags(xc, p, args, n):
f = 0
flagsArg = stringArg(xc, args, n, "xs:string", missingArgFallback="", emptyFallback="")
for c in flagsArg:
if c == 's': f |= re.S
elif c == 'm': f |= re.M
elif c == 'i': f |= re.I
elif c == 'x': f |= re.X
else:
raise XPathContext.XPathException(p, 'err:FORX0001', _('Regular expression interpretation flag unrecognized: {0}').format(flagsArg))
return f
def matches(xc, p, contextItem, args):
if not 2 <= len(args) <= 3: raise XPathContext.FunctionNumArgs()
input = stringArg(xc, args, 0, "xs:string?", emptyFallback="")
pattern = stringArg(xc, args, 1, "xs:string", emptyFallback="")
try:
return bool(re.search(pattern,input,flags=regexFlags(xc, p, args, 2)))
except sre_constants.error as err:
raise XPathContext.XPathException(p, 'err:FORX0002', _('fn:matches regular expression pattern error: {0}').format(err))
def replace(xc, p, contextItem, args):
if not 3 <= len(args) <= 4: raise XPathContext.FunctionNumArgs()
input = stringArg(xc, args, 0, "xs:string?", emptyFallback="") # empty string is default
pattern = stringArg(xc, args, 1, "xs:string", emptyFallback="")
fnReplacement = stringArg(xc, args, 2, "xs:string", emptyFallback="")
if re.findall(r"(^|[^\\])[$]|[$][^0-9]", fnReplacement):
raise XPathContext.XPathException(p, 'err:FORX0004', _('fn:replace pattern \'$\' error in: {0}').format(fnReplacement))
reReplacement = re.sub(r"[\\][$]", "$",
re.sub(r"(^|[^\\])[$]([1-9])", r"\\\2", fnReplacement))
try:
return re.sub(pattern,reReplacement,input,flags=regexFlags(xc, p, args, 3))
except sre_constants.error as err:
raise XPathContext.XPathException(p, 'err:FORX0002', _('fn:replace regular expression pattern error: {0}').format(err))
def tokenize(xc, p, contextItem, args):
raise fnFunctionNotAvailable()
def resolve_uri(xc, p, contextItem, args):
if len(args) != 2: raise XPathContext.FunctionNumArgs()
relative = stringArg(xc, args, 0, "xs:string?", emptyFallback=())
base = stringArg(xc, args, 1, "xs:string", emptyFallback=())
return xc.modelXbrl.modelManager.cntlr.webCache.normalizeUrl(relative,base)
def true(xc, p, contextItem, args):
return True
def false(xc, p, contextItem, args):
return False
def _not(xc, p, contextItem, args):
return not boolean(xc, p, contextItem, args)
def years_from_duration(xc, p, contextItem, args):
if len(args) != 1: raise XPathContext.FunctionNumArgs()
d = anytypeArg(xc, args, 0, 'duration', missingArgFallback=())
if d == (): return d
if isinstance(d, DayTimeDuration): return | |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from zzz_cognition_msgs/RoadObstacle.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import zzz_driver_msgs.msg
import zzz_perception_msgs.msg
import geometry_msgs.msg
class RoadObstacle(genpy.Message):
_md5sum = "39297845d32da18bcf3a07919856e7b2"
_type = "zzz_cognition_msgs/RoadObstacle"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# This message contains all the information that planning needs for a road object
# Unique indentity of the obstacle
uint64 uid
# The confidence of existence, can be used to determine whether this object is valid, or invalid
# A possible way of get the confidence is by using tracking age
float32 confidence
# The best guess of obstacle type
zzz_perception_msgs/ObjectClass cls
# Estimated kinematic properties
zzz_driver_msgs/RigidBodyState state
# Kinematic properties in Frenet Frame
zzz_driver_msgs/FrenetSerretState2D ffstate
# Relationship to lane, for prediction or locating
float32 lane_index # in which lane
float32 lane_anglediff # angle difference
float32 lane_dist_left_t # distance to lane
float32 lane_dist_right_t
float32 lane_dist_s # s position in the road
# A flag to mark whether the object is static
bool static
# XXX: Do we need history trajectories?
# ----- Physical Boundary (Optional) -----
uint8 shape_type
uint8 SHAPE_UNDEFINED = 0
uint8 SHAPE_POLYGON = 1
# Representation of the object if it's represented by polygon
geometry_msgs/Polygon shape
# bbox, refer to BoundingBox in zzz_perception_msgs
zzz_perception_msgs/DimensionWithCovariance dimension
#uint8 dimension
# Null uncertainty of Inf uncertainty means that the shape is not actually generated
float32[] shape_uncertainty
# ----- High level behavior estimation -----
uint8 behavior
uint8 BEHAVIOR_UNKNOWN = 0
uint8 BEHAVIOR_STOPPING = 1 # vehicle is going to stop completely or stay stopped
uint8 BEHAVIOR_FOLLOW = 2 # follow lane or straight line
uint8 BEHAVIOR_MOVING_LEFT = 3 # changing to its left lane or branch left
uint8 BEHAVIOR_MOVING_RIGHT = 4 # changing to its right lane or branch right
# This field is preserved to adapt to yield situation.
# Yield situation includes left turn, right turn, sequencial stop sign, emergency vehicle, etc.
# Yield means if your path conflict with the object's, you should wait for the object
# to go through the conflict point.
uint8 priority
uint8 PRIORITY_UNKNOWN = 0
uint8 PRIORITY_NORMAL = 1
uint8 PRIORITY_CAUTIOUS = 2 # You can move when this obstacle is far away
uint8 PRIORITY_STOP = 3 # Should let this vehicle to go and then you can move
================================================================================
MSG: zzz_perception_msgs/ObjectClass
# The size of (in meters) the bounding box surrounding the object's center pose.
# The unique numeric classification ID of object detected
uint32 classid
# The probability or confidence value of the detected object. By convention, this value should lie in the range 0~1.
float32 score
# Other information about the class (e.g. class name). Only for debug
string comments
##############################################################
### Here is a hierarchical table of all included types ###
##############################################################
# Hierarchy is encoded in a 32-bit integer. Each 8 bit stand for a level, and leftmost 8 bit is the top level
uint32 UNKNOWN = 0 # 0x0000
uint32 UNKNOWN_DYNAMIC = 16 # 0x0010
uint32 UNKNOWN_STATIC = 32 # 0x0020
uint32 VEHICLE = 1 # 0x0001
uint32 VEHICLE_PASSENGER = 17 # 0x0011, normal passenger_vehicles
uint32 VEHICEL_VAN = 33 # 0x0021
uint32 VEHICLE_TRUCK = 49 # 0x0031
uint32 VEHICLE_BUS = 65 # 0x0041
uint32 VEHICLE_SCHOOLBUS = 321 # 0x0141
uint32 VEHICLE_SCHOOLBUS_STOP = 4417 # 0x1141
uint32 VEHICLE_EMERGENCY = 81 # 0x0051, emergency vehicles, including
uint32 VEHICLE_EMERGENCY_POLICE = 337 # 0x0151
uint32 VEHICLE_EMERGENCY_POLICE_FLASH = 4433 # 0x1151
uint32 VEHICLE_EMERGENCY_FIRE = 593 # 0x0251
uint32 VEHICLE_EMERGENCY_FIRE_FLASH = 4689 # 0x1251
uint32 VEHICLE_EMERGENCY_CIVIL = 849 # 0x0351, including utility vehicle and tow trucks
uint32 VEHICLE_EMERGENCY_CIVIL_FLASH = 4945 # 0x1351
uint32 HUMAN = 2 # 0x0002
uint32 HUMAN_PEDESTRIAN = 18 # 0x0012
uint32 HUMAN_ROADWORKER = 34 # 0x0022
uint32 CYCLIST = 3 # 0x0003
uint32 CYCLIST_BICYCLE = 19 # 0x0013
uint32 CYCLIST_MOTORCYCLE = 35 # 0x0023
uint32 CYCLIST_TRICYCLE = 51 # 0x0033
uint32 ANIMAL = 4 # 0x0004
uint32 ANIMAL_DOGLIKE = 20 # 0x0014, includes dog, cat, wolf, etc.
uint32 ANIMAL_DEERLIKE = 36 # 0x0024, includes deer, etc.
uint32 ANIMAL_COWLIKE = 52 # 0x0034, includes cow, horse, pig, etc.
uint32 ROAD_OBJECT = 5 # 0x0005, objects in road area
uint32 ROAD_TRAFFIC_CONE = 21 # 0x0015, traffic cone
uint32 ROAD_TRAFFIC_BLOCKER = 37 # 0x0025, traffic blocker, e.g. "Road Closed" sign
uint32 ROADSIDE_OBJECT = 6 # 0x0006, objects in road side
uint32 ROADSIDE_TRAFFIC_LIGHT = 22 # 0x0016
uint32 ROADSIDE_TRAFFIC_SIGN = 38 # 0x0026
uint32 ROADSIDE_TREE = 54 # 0x0036, including all roadside vegetation
uint32 LEVEL_MASK_0 = 15 # 0x000f
uint32 LEVEL_MASK_1 = 255 # 0x00ff
uint32 LEVEL_MASK_2 = 4095 # 0x0fff
uint32 LEVEL_MASK_3 = 65535 # 0xffff
================================================================================
MSG: zzz_driver_msgs/RigidBodyState
# This message contains commonly used state variables of rigid body
# ID of frame fixed to the rigid body
string child_frame_id
# Location and orientatation of the object
geometry_msgs/PoseWithCovariance pose
# Linear and angular velocity of the object
geometry_msgs/TwistWithCovariance twist
# Linear and angular acceleration of the object
geometry_msgs/AccelWithCovariance accel
================================================================================
MSG: geometry_msgs/PoseWithCovariance
# This represents a pose in free space with uncertainty.
Pose pose
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: geometry_msgs/TwistWithCovariance
# This expresses velocity in free space with uncertainty.
Twist twist
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Twist
# This expresses velocity in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/AccelWithCovariance
# This expresses acceleration in free space with uncertainty.
Accel accel
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Accel
# This expresses acceleration in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: zzz_driver_msgs/FrenetSerretState2D
# This message describes a state in 2d Frenet-Serret Frame
# By 2d Frenet-Serret Frame we ignore the movement in z (in Cartesian) / b (in Frenet) direction
# For more information, refer to https://en.wikipedia.org/wiki/Frenet-Serret_formulas
# 2D states
float32 s # Offset in tanget direction
float32 d # Offset in normal direction
# we omit the offset in binormal direction
float32 psi # Heading angle in s-d plane
float32[9] pose_covariance
# First order derivatives
float32 vs
float32 vd
float32 omega
float32[9] twist_covariance
# Second order derivatives
float32 sa # prevent keyword conflict
float32 ad
float32 epsilon
float32[9] accel_covariance
================================================================================
MSG: geometry_msgs/Polygon
#A specification of a polygon where the first and last points are assumed to be connected
Point32[] points
================================================================================
MSG: geometry_msgs/Point32
# This contains the position of a point in free space(with 32 bits of precision).
# It is recommeded to use Point wherever possible instead of Point32.
#
# This recommendation is to promote interoperability.
#
# This message is designed to take up less space when sending
# lots of points at once, as in the case of a PointCloud.
float32 x
float32 y
float32 z
================================================================================
MSG: zzz_perception_msgs/DimensionWithCovariance
# Describing the size object in 3D space (in meters) with uncertainty
float64 length_x # width
float64 length_y # height
float64 length_z # length
# Row-major representation of the 3x3 covariance matrix
# In order, the parameters are: (length_x, length_y, length_z)
float64[9] covariance"""
# Pseudo-constants
SHAPE_UNDEFINED = 0
SHAPE_POLYGON = 1
BEHAVIOR_UNKNOWN = 0
BEHAVIOR_STOPPING = 1
BEHAVIOR_FOLLOW = 2
BEHAVIOR_MOVING_LEFT = 3
BEHAVIOR_MOVING_RIGHT = 4
PRIORITY_UNKNOWN = 0
PRIORITY_NORMAL = 1
PRIORITY_CAUTIOUS = 2
PRIORITY_STOP = 3
__slots__ = ['uid','confidence','cls','state','ffstate','lane_index','lane_anglediff','lane_dist_left_t','lane_dist_right_t','lane_dist_s','static','shape_type','shape','dimension','shape_uncertainty','behavior','priority']
_slot_types = ['uint64','float32','zzz_perception_msgs/ObjectClass','zzz_driver_msgs/RigidBodyState','zzz_driver_msgs/FrenetSerretState2D','float32','float32','float32','float32','float32','bool','uint8','geometry_msgs/Polygon','zzz_perception_msgs/DimensionWithCovariance','float32[]','uint8','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
uid,confidence,cls,state,ffstate,lane_index,lane_anglediff,lane_dist_left_t,lane_dist_right_t,lane_dist_s,static,shape_type,shape,dimension,shape_uncertainty,behavior,priority
:param args: complete set of field values, | |
# https://github.com/kusaanko/Blender_XFileSupport_BVE
#
# Copyright (c) 2021 kusaanko
# This is licensed under the Apache License 2.0
# see https://github.com/kusaanko/Blender_XFileSupport_BVE/blob/main/LICENSE
import os
import re
import bpy
from bpy.props import StringProperty, BoolProperty, FloatProperty, EnumProperty
from bpy_extras.io_utils import ImportHelper, ExportHelper
import urllib.request
import urllib.parse
import json
import webbrowser
import struct
bl_info = {
"name": "Import/Export DirectX X File (.x) for Bve",
"author": "kusaanko",
"version": (2, 0, 0),
"blender": (2, 83, 0),
"location": "File > Import / Export > DirectX XFile(.x)",
"description": "Import/Export files in the DirectX X file (.x)",
"warning": "This plug-in is for Bve. So some features are not supported.",
"wiki_url": "https://github.com/kusaanko/Blender_XFileSupport_BVE/wiki",
"tracker_url": "",
"category": "Import-Export"
}
__version__ = "2.0.0"
# locale
# (target_context, key): translated_str
translations_dict = {
"ja_JP": {
("*", "Remove All Objects and Materials"): "全てのオブジェクトとマテリアルを削除する",
("*", "The update of XFileSupport is available!"): "XFileSupportの更新が利用可能です!",
("*", "Your version:"): "現在のバージョン:",
("*", "New version:"): "新しいバージョン:",
("*", "Please download from this link."): "このリンクからダウンロードしてください。",
("*", "This file is not X file!"): "このファイルはXファイルではありません!",
("*", "Output mode"): "出力モード",
("*", "Binary"): "バイナリ",
("*", "Text mode"): "テキストモード",
("*", "Binary mode"): "バイナリモード",
}
}
TOKEN_NAME = 1
TOKEN_STRING = 2
TOKEN_INTEGER = 3
TOKEN_GUID = 5
TOKEN_INTEGER_LIST = 6
TOKEN_FLOAT_LIST = 7
TOKEN_OBRACE = 0x0A
TOKEN_CBRACE = 0x0B
TOKEN_OPAREN = 0x0C
TOKEN_CPAREN = 0x0D
TOKEN_OBRACKET = 0x0E
TOKEN_CBRACKET = 0x0F
TOKEN_OANGLE = 0x10
TOKEN_CANGLE = 0x11
TOKEN_DOT = 0x12
TOKEN_COMMA = 0x13
TOKEN_SEMICOLON = 0x14
TOKEN_TEMPLATE = 0x1F
TOKEN_WORD = 0x28
TOKEN_DWORD = 0x29
TOKEN_FLOAT = 0x2A
TOKEN_DOUBLE = 0x2B
TOKEN_CHAR = 0x2C
TOKEN_UCHAR = 0x2D
TOKEN_SWORD = 0x2E
TOKEN_SDWORD = 0x2F
TOKEN_VOID = 0x30
TOKEN_LPSTR = 0x31
TOKEN_UNICODE = 0x32
TOKEN_CSTRING = 0x33
TOKEN_ARRAY = 0x34
class ImportDirectXXFile(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.directx_x"
bl_description = 'Import from X file (.x)'
bl_label = "Import DirectX X File"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_options = {'UNDO'}
filepath: StringProperty(
name="input file",
subtype='FILE_PATH'
)
filename_ext = ".x"
filter_glob: StringProperty(
default="*.x",
options={'HIDDEN'},
)
remove_all: BoolProperty(
name="Remove All Objects and Materials",
default=True,
)
scale: FloatProperty(
name="Scale",
default=1.0
)
def __init__(self):
self.mesh_vertexes = []
self.mesh_faces = []
self.mesh_vertexes_redirect = {}
self.vertexes = []
self.mesh_faces_exact = []
self.mesh_tex_coord = []
self.material_face_indexes = []
self.material_count = 0
self.materials = []
self.is_binary = False
self.float_size = 32
self.ret_string = ""
self.ret_integer = 0
self.ret_float = 0
self.ret_integer_list = []
self.ret_float_list = []
self.ret_uuid = ""
self.byte_buffer = ByteBuffer(bytes())
def parse_mesh(self, element):
data = element.data
size = int(data[0:data.find(";")].replace(" ", ""))
num_matcher = NumMatcher(True, True)
data = data[data.find(";") + 1:]
num_matcher.set_target(data)
vertex = [0.0, 0.0, 0.0]
i = 0
self.mesh_vertexes = []
self.mesh_vertexes_redirect = {}
vertex_index = 0
while num_matcher.find():
vertex[i] = float(num_matcher.group())
i += 1
if i == 3:
i = 0
# DirectX X Y Z
# Blender X Z Y
vector = (vertex[0] * self.scale, vertex[2] * self.scale, vertex[1] * self.scale)
# 重複した座標は1つにまとめる
# リダイレクト先を登録しておく
if vector in self.mesh_vertexes:
self.mesh_vertexes_redirect[vertex_index] = self.mesh_vertexes.index(vector)
else:
self.mesh_vertexes_redirect[vertex_index] = len(self.mesh_vertexes)
self.mesh_vertexes.append(vector)
vertex_index += 1
if vertex_index == size:
break
data = data[num_matcher.get_end() + 1:]
indexes_size = 0
size = 0
positive_num_matcher = NumMatcher(False, True)
positive_num_matcher.set_target(data)
indexes = []
i = -2
self.mesh_faces = []
self.vertexes = []
self.mesh_faces_exact = []
while positive_num_matcher.find():
if i == -2:
indexes_size = int(positive_num_matcher.group())
elif i == -1:
size = int(positive_num_matcher.group())
indexes = [0] * size
else:
indexes[i] = int(positive_num_matcher.group())
i += 1
if i == size:
i = -1
# Blenderに記録する際に使用する頂点のインデックス
indexes.reverse()
vertexes = []
for l in range(len(indexes)):
if indexes[l] in self.mesh_vertexes_redirect:
vertexes.append(self.mesh_vertexes_redirect[indexes[l]])
else:
vertexes.append(indexes[l])
self.mesh_faces.append(vertexes)
# Xファイルに記述された実際の使用する頂点のインデックス(UV登録時に使用)
self.mesh_faces_exact.append(indexes)
if len(self.mesh_faces) == indexes_size:
break
def parse_texture_coords(self, element):
data = element.data
num_matcher = NumMatcher(True, True)
num_matcher.set_target(data)
num_matcher.find()
size = int(num_matcher.group())
vertex = [0.0, 0.0]
i = 0
while num_matcher.find():
vertex[i] = float(num_matcher.group())
i += 1
if i == 2:
i = 0
vertex[1] = -vertex[1] + 1
self.mesh_tex_coord.append(vertex)
vertex = [0.0, 0.0]
if len(self.mesh_tex_coord) == size:
break
def parse_mesh_material_list(self, element):
data = element.data.replace(" ", "")
num_matcher = NumMatcher(False, True)
num_matcher.set_target(data)
num_matcher.find()
self.material_count = int(num_matcher.group())
num_matcher.find()
size = int(num_matcher.group())
while num_matcher.find():
self.material_face_indexes.append(int(num_matcher.group()))
def parse_material(self, element):
color = element.data[0:element.data.find(";;")].replace(" ", "").split(";")
d = element.data[element.data.find(";;") + 2:]
power = float(d[0:d.find(";")])
d = d[d.find(";") + 1:]
specular_color = d[0:d.find(";;")].split(";")
d = d[d.find(";;") + 2:]
emission_color = d[0:d.find(";;")].split(";")
face_color = [1.0, 1.0, 1.0, 1.0]
for i in range(len(color)):
face_color[i] = float(color[i])
material = XMaterial()
material.face_color = face_color
material.power = power
material.specular_color = (
float(specular_color[0]),
float(specular_color[1]),
float(specular_color[2])
)
material.emission_color = (
float(emission_color[0]),
float(emission_color[1]),
float(emission_color[2]),
1.0
)
for tex in element.children:
if tex.element_type == "TextureFilename":
material.texture_path = tex.data[tex.data.find("\"") + 1:tex.data.rfind("\"")]
self.materials.append(material)
def parse_token(self):
token = self.byte_buffer.get_short()
if token == TOKEN_NAME:
length = self.byte_buffer.get_int()
self.ret_string = self.byte_buffer.get_length(length).decode()
elif token == TOKEN_INTEGER:
self.ret_integer = self.byte_buffer.get_int()
elif token == TOKEN_STRING:
length = self.byte_buffer.get_int()
self.ret_string = self.byte_buffer.get_length(length).decode()
self.parse_token()
elif token == TOKEN_GUID:
# GUIDは使用しないため無視する
self.byte_buffer.get_int()
self.byte_buffer.get_short()
self.byte_buffer.get_short()
self.byte_buffer.get_length(8)
elif token == TOKEN_INTEGER_LIST:
length = self.byte_buffer.get_int()
self.ret_integer_list = [0] * length
for i in range(length):
self.ret_integer_list[i] = self.byte_buffer.get_int()
elif token == TOKEN_FLOAT_LIST:
length = self.byte_buffer.get_int()
self.ret_float_list = [0.0] * length
if self.float_size == 64:
for i in range(length):
self.ret_float_list[i] = self.byte_buffer.get_double()
else:
for i in range(length):
self.ret_float_list[i] = self.byte_buffer.get_float()
elif token == TOKEN_TEMPLATE:
# テンプレートは使用する必要がないため無視する
self.parse_token_loop(TOKEN_CBRACE)
return token
def parse_token_loop(self, token):
while self.parse_token() != token:
pass
def parse_bin(self):
self.materials = []
while self.byte_buffer.has_remaining():
token = self.parse_token()
if token == TOKEN_NAME:
if self.ret_string == "Mesh":
self.parse_mesh_bin()
elif self.ret_string == "MeshTextureCoords":
self.parse_mesh_texture_coords_bin()
elif self.ret_string == "MeshMaterialList":
self.parse_mesh_material_list_bin()
def parse_mesh_bin(self):
self.parse_token_loop(TOKEN_INTEGER_LIST)
self.parse_token_loop(TOKEN_FLOAT_LIST)
self.mesh_vertexes = []
i = 0
vertex_index = 0
while vertex_index < self.ret_integer_list[0]:
# DirectX X Y Z
# Blender X Z Y
vector = (
self.ret_float_list[i] * self.scale,
self.ret_float_list[i + 2] * self.scale,
self.ret_float_list[i + 1] * self.scale
)
# 重複した座標は1つにまとめる
# リダイレクト先を登録しておく
if vector in self.mesh_vertexes:
self.mesh_vertexes_redirect[vertex_index] = self.mesh_vertexes.index(vector)
else:
self.mesh_vertexes_redirect[vertex_index] = len(self.mesh_vertexes)
self.mesh_vertexes.append(vector)
vertex_index += 1
i += 3
self.parse_token_loop(TOKEN_INTEGER_LIST)
self.mesh_faces = []
i = 1
while i < len(self.ret_integer_list):
length = self.ret_integer_list[i]
indexes = self.ret_integer_list[i + 1:i + 1 + length]
# Blenderに記録する際に使用する頂点のインデックス
indexes.reverse()
vertexes = []
for l in range(len(indexes)):
if indexes[l] in self.mesh_vertexes_redirect:
vertexes.append(self.mesh_vertexes_redirect[indexes[l]])
else:
vertexes.append(indexes[l])
self.mesh_faces.append(vertexes)
# Xファイルに記述された実際の使用する頂点のインデックス(UV登録時に使用)
self.mesh_faces_exact.append(indexes)
i += length + 1
def parse_mesh_texture_coords_bin(self):
self.parse_token_loop(TOKEN_INTEGER_LIST)
self.parse_token_loop(TOKEN_FLOAT_LIST)
self.mesh_tex_coord = []
i = 0
while i < len(self.ret_float_list):
vertex = [self.ret_float_list[i], self.ret_float_list[i + 1]]
vertex[1] = -vertex[1] + 1
self.mesh_tex_coord.append(vertex)
i += 2
def parse_mesh_material_list_bin(self):
self.parse_token_loop(TOKEN_INTEGER_LIST)
self.material_count = self.ret_integer_list[0]
i = 2
self.material_face_indexes = self.ret_integer_list[2:self.ret_integer_list[1] + 2]
pos = self.byte_buffer.pos
while True:
token = self.parse_token()
if token == TOKEN_NAME and self.ret_string == "Material":
self.parse_material_bin()
else:
self.byte_buffer.pos = pos
break
pos = self.byte_buffer.pos
def parse_material_bin(self):
self.parse_token_loop(TOKEN_FLOAT_LIST)
material = XMaterial()
material.face_color = self.ret_float_list[0:4]
material.power = self.ret_float_list[4]
material.specular_color = self.ret_float_list[5:8]
material.emission_color = (self.ret_float_list[8], self.ret_float_list[9], self.ret_float_list[10], 1.0)
token = self.parse_token()
if token == TOKEN_NAME and self.ret_string == "TextureFilename":
self.parse_token_loop(TOKEN_STRING)
material.texture_path = self.ret_string
self.parse_token_loop(TOKEN_CBRACE)
if token != TOKEN_CBRACE:
self.parse_token_loop(TOKEN_CBRACE)
self.materials.append(material)
def execute(self, context):
for obj in bpy.context.scene.objects:
obj.select_set(False)
if self.remove_all:
for obj in bpy.context.scene.objects:
if obj.type == 'MESH':
obj.select_set(True)
else:
obj.select_set(False)
bpy.ops.object.delete()
for material in bpy.data.materials:
material.user_clear()
bpy.data.materials.remove(material)
# xファイルを読み込み
with open(self.filepath, "rb") as f:
header = f.read(16)
if header[0:4] == b'xof ':
# フォーマットのチェック
if header[8:12] == b'txt ':
self.is_binary = False
elif header[8:12] == b'bin ':
self.is_binary = True
else:
raise Exception(bpy.app.translations.pgettext("This file is not X file!"))
if self.is_binary:
# バイナリ
with open(self.filepath, "rb") as f:
f.read(16)
data = f.read()
self.byte_buffer = ByteBuffer(data)
self.parse_bin()
else:
# テキスト
with open(self.filepath) as f:
x_model_file_string = f.read().split("\n")
x_elements = []
x_element = XElement()
# テキストデータからXElementにパース
for line in range(len(x_model_file_string)):
if line <= x_element.end_line_num:
continue
x_element = to_XElement(x_model_file_string, line)
x_elements.append(x_element)
# XElementからデータを分析
for element in x_elements:
if element.element_type == "Mesh":
self.parse_mesh(element)
for ele in element.children:
# テクスチャの座標(UV)
if ele.element_type == "MeshTextureCoords":
self.parse_texture_coords(ele)
# マテリアルのリスト マテリアル数;\n面の数;\nその面が使用するマテリアルのインデックス,...
if ele.element_type == "MeshMaterialList":
self.parse_mesh_material_list(ele)
for ch in ele.children:
if ch.element_type == "Material":
self.parse_material(ch)
else:
if element.element_type == "Material":
self.parse_material(element)
elif element.element_type == "MeshTextureCoords":
self.parse_texture_coords(element)
material_faces = []
for i in range(self.material_count):
material_faces.append([])
# マテリアル別に面を整理
if self.material_count > 0:
for i in range(len(self.mesh_faces)):
if len(self.material_face_indexes) <= i:
self.material_face_indexes.append(0)
material_id = self.material_face_indexes[i]
material_faces[material_id].append(i)
# モデル名を決定
model_name = os.path.splitext(os.path.basename(self.filepath))[0]
# マテリアルごとにオブジェクトを作成
for j in range(len(material_faces)):
faces_data = []
vertexes_data = []
faces = material_faces[j]
if len(faces) == 0:
continue
# マテリアルの有無
available_material = len(self.materials) > self.material_face_indexes[faces[0]]
x_material = self.materials[self.material_face_indexes[faces[0]]]
# マテリアルを作成
material = bpy.data.materials.new(model_name + "Material")
# ブレンドモードの設定
| |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains macros that create repository rules for standard and custom sets of execution properties.
It also contains macros to create and manipulate dictionaries of properties to be used as execution
properties for RBE.
"""
def _add(
dict,
var_name,
key,
value,
verifier_fcn = None):
"""Add a key-value to a dict.
If value is None, don't add anything.
The dict will always be a string->string dict, but the value argument to this function may be of a different type.
Args:
dict: The dict to update.
var_name: Used for error messages.
key: The key in the dict.
value: The value provided by the caller. This may or may not be what ends up in the dict.
verifier_fcn: Verifies the validity of value. On error, it's the verifier's responsibility to call fail().
"""
if value == None:
return
if verifier_fcn != None:
verifier_fcn(var_name, value) # verifier_fcn will fail() if necessary
dict[key] = str(value)
def _add_labels(
dict,
var_name,
labels):
"""Add zero of more label key-values to a dict.
If labels is None, don't add anything. Otherwise, labels must be a string->string dictionary.
For every key, value in the labels dictionary, add to dict the key-value ("label:" + key -> value).
Args:
dict: The dict to update.
var_name: Used for error messages.
labels: A string->string dictionary of labels.
"""
if labels == None:
return
_verify_labels(var_name, labels)
for key, value in labels.items():
dict["label:%s" % key] = value
def _verify_string(var_name, value):
if type(value) != "string":
fail("%s must be a string" % var_name)
def _verify_bool(var_name, value):
if type(value) != "bool":
fail("%s must be a bool" % var_name)
def _verify_labels(var_name, labels):
# labels must be a string->string dict.
if type(labels) != "dict":
fail("%s must be a dict" % var_name)
for key, value in labels.items():
_verify_label(var_name, key, value)
def _verify_label(var_name, key, value):
# This is based on the Requirements for labels outlined in
# https://cloud.google.com/resource-manager/docs/creating-managing-labels.
# Keys have a minimum length of 1 character and a maximum length of 63
# characters, and cannot be empty. Values can be empty, and have a maximum
# length of 63 characters.
_verify_string("%s.%s" % (var_name, key), key)
if len(key) == 0:
fail("%s cannot contain an empty key" % var_name)
if len(key) > 63:
fail("%s.%s exceeds the 63 character length limit for a label name" % (var_name, key))
_verify_string("value of %s.%s" % (var_name, key), value)
if len(value) > 63:
fail("value of %s.%s exceeds the 63 character length limit for a label value" % (var_name, key))
# The actual requirement is:
# Keys and values can contain only lowercase letters, numeric characters,
# underscores, and dashes. All characters must use UTF-8 encoding, and
# international characters are allowed.
# Keys must start with a lowercase letter or international character.
#
# But since I don't know of a way to verify international characters in
# starlark, we will enforce slightly less strict requirements. Namely:
# Keys and labels must not contain upper case letters.
# Keys must not start with a number, underscore or dash.
if key != key.lower():
fail("%s.%s must not contain capital letters" % (var_name, key))
if value != value.lower():
fail("value of %s.%s must not contain capital letters" % (var_name, key))
if "0123456789-_".find(key[0]) != -1:
fail("%s.%s must start with a lowercase letter or international character" % (var_name, key))
def _verify_one_of(var_name, value, valid_values):
_verify_string(var_name, value)
if value not in valid_values:
fail("%s must be one of %s" % (var_name, valid_values))
def _verify_os(var_name, value):
_verify_one_of(var_name, value, ["Linux", "Windows"])
def _verify_docker_network(var_name, value):
_verify_one_of(var_name, value, ["standard", "off"])
def _verify_docker_shm_size(var_name, value):
_verify_string(var_name, value)
# The expect format is <number><unit>.
# <number> must be greater than 0.
# <unit> is optional and can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
# The entire string is also allowed to be empty.
if value == "":
return # Both <number> and <unit> can be unspecified.
# The last char can be one of [bkmg], or it can be omitted. The rest should be a number.
# Peel off the last character if it is a valid unit and put the remainder in number.
number = value if "bkmg".find(value[-1:]) == -1 else value[:-1]
if not number.isdigit():
fail("%s = \"%s\" must be of the format \"[0-9]*[bkmg]?\"" % (var_name, value))
if number == "0":
fail("%s = \"%s\" must have a numeric value greater than 0." % (var_name, value))
PARAMS = {
"container_image": struct(
key = "container-image",
verifier_fcn = _verify_string,
),
"docker_add_capabilities": struct(
key = "dockerAddCapabilities",
verifier_fcn = _verify_string,
),
"docker_drop_capabilities": struct(
key = "dockerDropCapabilities",
verifier_fcn = _verify_string,
),
"docker_network": struct(
key = "dockerNetwork",
verifier_fcn = _verify_docker_network,
),
"docker_privileged": struct(
key = "dockerPrivileged",
verifier_fcn = _verify_bool,
),
"docker_run_as_root": struct(
key = "dockerRunAsRoot",
verifier_fcn = _verify_bool,
),
"docker_runtime": struct(
key = "dockerRuntime",
verifier_fcn = _verify_string,
),
"docker_shm_size": struct(
key = "dockerShmSize",
verifier_fcn = _verify_docker_shm_size,
),
"docker_sibling_containers": struct(
key = "dockerSiblingContainers",
verifier_fcn = _verify_bool,
),
"docker_ulimits": struct(
key = "dockerUlimits",
verifier_fcn = _verify_string,
),
"docker_use_urandom": struct(
key = "dockerUseURandom",
verifier_fcn = _verify_bool,
),
"gce_machine_type": struct(
key = "gceMachineType",
verifier_fcn = _verify_string,
),
"os_family": struct(
key = "OSFamily",
verifier_fcn = _verify_os,
),
"pool": struct(
key = "Pool",
verifier_fcn = _verify_string,
),
}
def create_exec_properties_dict(**kwargs):
fail("create_exec_properties_dict is deprecated. Please use create_rbe_exec_properties_dict instead.")
def create_rbe_exec_properties_dict(**kwargs):
"""Return a dict with exec_properties that are supported by RBE.
Args:
**kwargs: Arguments specifying what keys are populated in the returned dict.
Note that the name of the key in kwargs is not the same as the name of the key in the returned dict.
For more information about what each parameter is see https://cloud.google.com/remote-build-execution/docs/remote-execution-properties.
If this link is broken for you, you may not to be whitelisted for RBE. See https://groups.google.com/forum/#!forum/rbe-alpha-customers.
Returns:
A dict that can be used as, for example, the exec_properties parameter of platform.
"""
dict = {}
for var_name, value in kwargs.items():
if var_name in PARAMS:
p = PARAMS[var_name]
_add(
dict = dict,
var_name = var_name,
key = p.key,
value = value,
verifier_fcn = p.verifier_fcn if hasattr(p, "verifier_fcn") else None,
)
elif var_name == "labels":
# labels is a special parameter. Its value is a dict and it maps to
# multiple properties.
_add_labels(
dict = dict,
var_name = var_name,
labels = value,
)
else:
fail("%s is not a valid var_name" % var_name)
return dict
def merge_dicts(*dict_args):
fail("merge_dicts is deprecated. Please use dicts.add() instead. See https://github.com/bazelbuild/bazel-skylib/blob/master/docs/dicts_doc.md")
def _exec_property_sets_repository_impl(repository_ctx):
repository_ctx.file(
"BUILD",
content = """
load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
package(default_visibility = ["//visibility:public"])
bzl_library(
name = "constants",
srcs = [
"constants.bzl",
],
)
""",
executable = False,
)
repository_ctx.file(
"constants.bzl",
content = repository_ctx.attr.constants_bzl_content,
executable = False,
)
# _exec_property_sets_repository is a repository rule that creates a repo with the specified exec_property_sets.
_exec_property_sets_repository = repository_rule(
implementation = _exec_property_sets_repository_impl,
local = True,
attrs = {
"constants_bzl_content": attr.string(
mandatory = True,
doc = "The content of the constants.bzl file within the repository rule.",
),
},
)
def _verify_dict_of_dicts(name, dicts):
""" Verify that dict is of type {string->{string->string}}.
Args:
name: Name of the repo rule. Used for error messages.
dicts: a dict whose key is a string and whose value is a dict from string to string.
"""
# Verify that dict is of type {string->{string->string}}.
for key, value in dicts.items():
if type(key) != "string":
fail("In repo rule %s, execution property set name %s must be a string" % (name, key))
if type(value) != "dict":
fail("In repo rule %s, execution property set of %s must be a dict" % (name, key))
for k, v in value.items():
if type(k) != "string":
fail("In repo rule %s, execution property set %s, | |
this
variable from initial population generation) is the new Tree to be insertd into 'tree', replacing 'branch'.
The end result is a Tree with a mutated branch. Pretty cool, huh?
Called by: fx_evolve_grow_mutate, fx_evolve_grow_crossover
Arguments required: tree, branch
'''
# *_branch_top_copy merged with *_body_copy 2018 04/12
### 1) insert branch_top from 'gp.tree' into 'tree' ###
branch_top = int(branch[0])
tree[5][branch_top] = 'func' # update type ('func' to 'term' or 'term' to 'term'); this modifies gp.tree[5][1] from 'root' to 'func'
tree[6][branch_top] = self.tree[6][1] # copy node_label from new tree
tree[8][branch_top] = self.tree[8][1] # copy node_arity from new tree
tree = np.delete(tree, branch[1:], axis = 1) # delete all nodes beneath point of mutation ('branch_top')
c_buffer = self.fx_evolve_c_buffer(tree, branch_top) # generate c_buffer for point of mutation ('branch_top')
tree = self.fx_evolve_child_insert(tree, branch_top, c_buffer) # insert a single new node ('branch_top')
tree = self.fx_evolve_node_renum(tree) # renumber all 'NODE_ID's
if self.display == 'db':
print '\n\t ... inserted node 1 of', len(self.tree[3])-1
print '\n\033[36m This is the Tree after a new node is inserted:\033[0;0m\n', tree; self.fx_karoo_pause(0)
### 2) insert branch_body from 'gp.tree' into 'tree' ###
node_count = 2 # set node count for 'gp.tree' to 2 as the new root has already replaced 'branch_top' (above)
while node_count < len(self.tree[3]): # increment through all nodes in the new Tree ('gp.tree'), starting with node 2
for j in range(1, len(tree[3])): # increment through all nodes in tourn_winner ('tree')
if self.display == 'db': print '\tScanning tourn_winner node_id:', j
if tree[5][j] == '':
tree[5][j] = self.tree[5][node_count] # copy 'node_type' from branch to tree
tree[6][j] = self.tree[6][node_count] # copy 'node_label' from branch to tree
tree[8][j] = self.tree[8][node_count] # copy 'node_arity' from branch to tree
if tree[5][j] == 'term':
tree = self.fx_evolve_child_link_fix(tree) # fix all child links
tree = self.fx_evolve_node_renum(tree) # renumber all 'NODE_ID's
if tree[5][j] == 'func':
c_buffer = self.fx_evolve_c_buffer(tree, j) # generate 'c_buffer' for point of mutation ('branch_top')
tree = self.fx_evolve_child_insert(tree, j, c_buffer) # insert new nodes
tree = self.fx_evolve_child_link_fix(tree) # fix all child links
tree = self.fx_evolve_node_renum(tree) # renumber all 'NODE_ID's
if self.display == 'db':
print '\n\t ... inserted node', node_count, 'of', len(self.tree[3])-1
print '\n\033[36m This is the Tree after a new node is inserted:\033[0;0m\n', tree; self.fx_karoo_pause(0)
node_count = node_count + 1 # exit loop when 'node_count' reaches the number of columns in the array 'gp.tree'
return tree
def fx_evolve_branch_copy(self, tree, branch):
'''
This method prepares a stand-alone Tree as a copy of the given branch.
Called by: fx_evolve_crossover
Arguments required: tree, branch
'''
new_tree = np.array([ ['TREE_ID'],['tree_type'],['tree_depth_base'],['NODE_ID'],['node_depth'],['node_type'],['node_label'],['node_parent'],['node_arity'],['node_c1'],['node_c2'],['node_c3'],['fitness'] ])
# tested 2015 06/08
for n in range(len(branch)):
node = branch[n]
branch_top = int(branch[0])
TREE_ID = 'copy'
tree_type = tree[1][1]
tree_depth_base = int(tree[4][branch[-1]]) - int(tree[4][branch_top]) # subtract depth of 'branch_top' from the last in 'branch'
NODE_ID = tree[3][node]
node_depth = int(tree[4][node]) - int(tree[4][branch_top]) # subtract the depth of 'branch_top' from the current node depth
node_type = tree[5][node]
node_label = tree[6][node]
node_parent = '' # updated by 'fx_evolve_parent_link_fix', below
node_arity = tree[8][node]
node_c1 = '' # updated by 'fx_evolve_child_link_fix', below
node_c2 = ''
node_c3 = ''
fitness = ''
new_tree = np.append(new_tree, [ [TREE_ID],[tree_type],[tree_depth_base],[NODE_ID],[node_depth],[node_type],[node_label],[node_parent],[node_arity],[node_c1],[node_c2],[node_c3],[fitness] ], 1)
new_tree = self.fx_evolve_node_renum(new_tree)
new_tree = self.fx_evolve_child_link_fix(new_tree)
new_tree = self.fx_evolve_parent_link_fix(new_tree)
new_tree = self.fx_data_tree_clean(new_tree)
return new_tree
def fx_evolve_c_buffer(self, tree, node):
'''
This method serves the very important function of determining the links from parent to child for any given
node. The single, simple formula [parent_arity_sum + prior_sibling_arity - prior_siblings] perfectly determines
the correct position of the child node, already in place or to be inserted, no matter the depth nor complexity
of the tree.
This method is currently called from the evolution methods, but will soon (I hope) be called from the first
generation Tree generation methods (above) such that the same method may be used repeatedly.
Called by: fx_evolve_child_link_fix, fx_evolve_banch_top_copy, fx_evolve_branch_body_copy
Arguments required: tree, node
'''
parent_arity_sum = 0
prior_sibling_arity = 0
prior_siblings = 0
for n in range(1, len(tree[3])): # increment through all nodes (exclude 0) in array 'tree'
if int(tree[4][n]) == int(tree[4][node])-1: # find parent nodes at the prior depth
if tree[8][n] != '': parent_arity_sum = parent_arity_sum + int(tree[8][n]) # sum arities of all parent nodes at the prior depth
if int(tree[4][n]) == int(tree[4][node]) and int(tree[3][n]) < int(tree[3][node]): # find prior siblings at the current depth
if tree[8][n] != '': prior_sibling_arity = prior_sibling_arity + int(tree[8][n]) # sum prior sibling arity
prior_siblings = prior_siblings + 1 # sum quantity of prior siblings
c_buffer = node + (parent_arity_sum + prior_sibling_arity - prior_siblings) # One algo to rule the world!
return c_buffer
def fx_evolve_child_link(self, tree, node, c_buffer):
'''
Link each parent node to its children.
Called by: fx_evolve_child_link_fix
Arguments required: tree, node, c_buffer
'''
if int(tree[3][node]) == 1: c_buffer = c_buffer + 1 # if root (node 1) is passed through this method
if tree[8][node] != '':
if int(tree[8][node]) == 0: # if arity = 0
tree[9][node] = ''
tree[10][node] = ''
tree[11][node] = ''
elif int(tree[8][node]) == 1: # if arity = 1
tree[9][node] = c_buffer
tree[10][node] = ''
tree[11][node] = ''
elif int(tree[8][node]) == 2: # if arity = 2
tree[9][node] = c_buffer
tree[10][node] = c_buffer + 1
tree[11][node] = ''
elif int(tree[8][node]) == 3: # if arity = 3
tree[9][node] = c_buffer
tree[10][node] = c_buffer + 1
tree[11][node] = c_buffer + 2
else: print '\n\t\033[31m ERROR! In fx_evolve_child_link: node', node, 'has arity', tree[8][node]; self.fx_karoo_pause(0)
return tree
def fx_evolve_child_link_fix(self, tree):
'''
In a given Tree, fix 'node_c1', 'node_c2', 'node_c3' for all nodes.
This is required anytime the size of the array 'gp.tree' has been modified, as with both Grow and Full mutation.
Called by: fx_evolve_grow_mutate, fx_evolve_crossover, fx_evolve_branch_body_copy, fx_evolve_branch_copy
Arguments required: tree
'''
# tested 2015 06/04
for node in range(1, len(tree[3])):
c_buffer = self.fx_evolve_c_buffer(tree, node) # generate c_buffer for each node
tree = self.fx_evolve_child_link(tree, node, c_buffer) # update child links for each node
return tree
def fx_evolve_child_insert(self, tree, node, c_buffer):
'''
Insert child node into the copy of a parent Tree.
Called by: fx_evolve_branch_insert
Arguments required: tree, node, c_buffer
'''
if int(tree[8][node]) == 0: # if arity = 0
print '\n\t\033[31m ERROR! In fx_evolve_child_insert: node', node, 'has arity 0\033[0;0m'; self.fx_karoo_pause(0)
elif int(tree[8][node]) == 1: # if arity = 1
tree = np.insert(tree, c_buffer, '', axis=1) # insert node for 'node_c1'
tree[3][c_buffer] = c_buffer # node ID
tree[4][c_buffer] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer] = int(tree[3][node]) # parent ID
elif int(tree[8][node]) == 2: # if arity = 2
tree = np.insert(tree, c_buffer, '', axis=1) # insert node for 'node_c1'
tree[3][c_buffer] = c_buffer # node ID
tree[4][c_buffer] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer] = int(tree[3][node]) # parent ID
tree = np.insert(tree, c_buffer + 1, '', axis=1) # insert node for 'node_c2'
tree[3][c_buffer + 1] = c_buffer + 1 # node ID
tree[4][c_buffer + 1] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer + 1] = int(tree[3][node]) # parent ID
elif int(tree[8][node]) == 3: # if arity = 3
tree = np.insert(tree, c_buffer, '', axis=1) # insert node for 'node_c1'
tree[3][c_buffer] = c_buffer # node ID
tree[4][c_buffer] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer] = int(tree[3][node]) # parent ID
tree = np.insert(tree, c_buffer + 1, '', axis=1) # insert node for 'node_c2'
tree[3][c_buffer + 1] = c_buffer + 1 # node ID
tree[4][c_buffer + 1] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer + 1] = int(tree[3][node]) # parent ID
tree = np.insert(tree, c_buffer + 2, '', axis=1) # insert node for 'node_c3'
tree[3][c_buffer + 2] = c_buffer + 2 # node ID
tree[4][c_buffer + 2] = int(tree[4][node]) + 1 # node_depth
tree[7][c_buffer + 2] = int(tree[3][node]) # parent ID
else: print '\n\t\033[31m ERROR! In fx_evolve_child_insert: node', node, 'arity > 3\033[0;0m'; self.fx_karoo_pause(0)
return tree
def fx_evolve_parent_link_fix(self, tree):
'''
In a given Tree, fix 'parent_id' for all nodes.
This is automatically handled in all mutations except with Crossover due to the need to copy branches 'a' and
'b' to their own trees before inserting them into copies of the parents.
Technically speaking, the 'node_parent' value is not used by any methods. The parent ID can be completely out
of whack and the expression will work perfectly. This is maintained for the sole purpose of granting the user
a friendly, makes-sense interface which can be read in both directions.
Called by: fx_evolve_branch_copy
Arguments required: tree
'''
### THIS METHOD MAY NOT BE REQUIRED AS SORTING 'branch' SEEMS TO HAVE FIXED 'parent_id' ###
# tested 2015 06/05
for node in range(1, len(tree[3])):
if tree[9][node] != '':
child = int(tree[9][node])
tree[7][child] = node
if tree[10][node] != '':
child = int(tree[10][node])
tree[7][child] = node
if tree[11][node] != '':
child = int(tree[11][node])
tree[7][child] = node
return tree
def fx_evolve_node_arity_fix(self, tree):
'''
In a given Tree, fix 'node_arity' for all nodes labeled 'term' but with arity 2.
This is required after a function has been replaced by a terminal, as may occur with both Grow mutation and
Crossover.
Called by: fx_evolve_grow_mutate, fx_evolve_tree_prune
Arguments required: tree
'''
# tested 2015 05/31
for n in range(1, len(tree[3])): # increment through all nodes (exclude 0) in array 'tree'
if tree[5][n] == 'term': | |
grid components X and Y must either have the
same shape as Z or fulfill the requirement len(X)==n and len(Y)==m,
where m,n=shape(Z). The color is determined by the array C which must
have the same shape as Z. If the color array C is not given, Z is used
as the color array (i.e., C=Z).
Calling::
mesh(Z[, C])
is the same as calling mesh(range(n), range(m), Z[, C]),
where m,n = shape(Z).
Calling::
mesh(ax, ...)
plots into the Axis object ax instead of the current axis.
@return: A Surface object.
Examples:
>>> x = y = linspace(-2, 2, 21)
>>> xx, yy = meshgrid(x, y)
>>> zz = exp(-xx**2)*exp(-yy**2)
>>> mesh(xx, yy, zz)
"""
if not 'description' in kwargs:
kwargs['description'] = 'mesh: 3D mesh'
ax, args, nargs = self._check_args(*args)
h = Surface(*args, **kwargs)
ax.add(h)
if not ax.getp('hold'):
if not 'grid' in kwargs:
kwargs['grid'] = True
if not 'view' in kwargs:
kwargs['view'] = 3
ax.setp(**kwargs)
self.gcf().setp(**kwargs)
self.setp(**kwargs)
if self.getp('interactive') and self.getp('show'):
self._replot()
return h
def meshc(self, *args, **kwargs):
"""Draw a mesh with a contour plot beneath.
Calling::
meshc(...)
is the same as calling mesh(...) only that a contour plot is drawn
beneath the mesh.
Examples:
Draw a mesh with contour lines:
>>> x = linspace(-2, 2, 21)
>>> xx, yy = meshgrid(x)
>>> zz = peaks(xx, yy)
>>> meshc(xx, yy, zz)
Draw a mesh with 20 contour lines:
>>> meshc(xx, yy, zz, clevels=20)
Draw a mesh with contour lines at height -0.2, -0.5, 0.2, 0.5:
>>> meshc(xx, yy, zz, cvector=[-0.2,-0.5,0.2,0.5])
Draw a mesh with contours and label the contours:
>>> meshc(xx, yy, zz, clabels='on')
"""
kwargs['description'] = 'meshc: 3D mesh with contours at base'
return self.mesh(*args, **kwargs)
def surf(self, *args, **kwargs):
"""Draw a 3D solid surface.
Calling::
surf(...)
is the same as calling mesh(...), except that a solid colored surface
is drawn instead of a wireframe mesh.
Examples:
>>> x = linspace(-2, 2, 21)
>>> xx, yy = meshgrid(x)
>>> zz = xx**2 + yy**2
>>> surf(xx, yy, zz)
"""
if not 'description' in kwargs:
kwargs['description'] = 'surf: 3D surface'
return self.mesh(*args, **kwargs)
def surfc(self, *args, **kwargs):
"""Draw a solid surface with contours beneath.
Calling::
surfc(...)
is the same as calling surf(...) only that a contour plot is drawn
beneath the surface.
"""
kwargs['description'] = 'surfc: 3D surface with contours at base'
return self.surf(*args, **kwargs)
def surfl(self, *args, **kwargs):
"""3D shaded surface with lighting."""
raise NotImplemetedError("'surfl' is not implemented")
def quiver3(self, *args, **kwargs):
"""Draw velocity vectors in 3D space.
Calling::
quiver3(X, Y, Z, U, V, W)
plots arrows from the 3D vector field with components U,V,W at the
grid defined by X,Y,Z. The shape of the three vector components is
assumed to be the same, while the grid components must either have
the same shape as U or fulfill the requirements len(X)==n, len(Y)==m,
and len(Z)==p, where m,n,p=shape(U).
Calling::
quiver3(Z,U,V,W)
gives the same result as above, but it is assumed that
X,Y = meshgrid(range(n),range(m)), where m,n=shape(Z).
Calling::
quiver3(..., s)
scales the arrows by the scale factor given in s. The default is s=1,
while a value of s=0 turns off automatic scaling.
Calling::
quiver3(..., 'filled')
fills the arrows.
Calling::
quiver3(..., fmt)
sets the specification on the arrows as given in the format string
fmt (see the plot command for further information on format strings).
Calling::
quiver3(ax, ...)
plots the vectors in the Axis object ax instead of the current axis.
@return: A VelocityVectors object.
Examples:
Draw the "radius vector field" v = (x,y,z):
>>> x = y = linspace(-3,3,4)
>>> xv, yv, zv = meshgrid(x, y, sparse=False)
>>> yv, vv, wv = xv, yv, zv
>>> quiver3(xv, yv, zv, uv, uv, wv, 'filled', 'r',
... axis=[-7,7,-7,7,-7,7])
Draw the path of a projectile as a function of time:
FIXME: This example is taken from ...
>>> vz = 10 # Velocity
>>> a = -32 # Acceleration
>>> t = linspace(0,1,11)
>>> z = vz*t + 1./2*a*t**2
>>> vx = 2
>>> x = vx*t
>>> vy = 3
>>> y = vy*t
>>> u = gradient(x)
>>> v = gradient(y)
>>> w = gradient(z)
>>> scale = 0
>>> quiver3(x,y,z,u,v,w,0,view=[70,18],grid='on',
... axis=[0,3.5,0,3,-10,2])
"""
kwargs['description'] = "quiver3: 3D vector field"
return self.quiver(*args, **kwargs)
def contour3(self, *args, **kwargs):
"""Draw 3D contour plot.
Calling::
contour3(...)
is the same as calling contour(...), except that the contours are
drawn at their coresponding height level.
Examples:
>>> contour3(peaks())
"""
kwargs['description'] = "contour3: 3D contours at surface"
return self.contour(*args, **kwargs)
# Volume plotting
def slice_(self, *args, **kwargs):
"""Draw volumetric slice plot.
Calling::
slice_(X,Y,Z,V,Sx,Sy,Sz)
draws orthogonal slice planes through the volumetric data set V
defined on the grid with components X, Y, and Z. The grid components
must either have the same shape as V or fulfill the requirement
len(X)==n, len(Y)==m, and len(Z)==p, where m,n,p=shape(V). The Sx,
Sy, and Sz arrays defines the slice planes in the x, y, and z
direction, respectively.
Calling::
slice_(V,Sx,Sy,Sz)
is the same as calling slice_(range(n),range(m),range(p),V,Sx,Sy,Sz),
where m,n,p = shape(V).
Calling::
slice_(X,Y,Z,V,XI,YI,ZI)
draws slices through the volumetric data set V along the surface
defined by the arrays XI,YI,ZI.
Calling::
slice_(V,XI,YI,ZI)
is the same as calling slice_(range(n),range(m),range(p)),V,XI,YI,ZI),
where m,n,p = shape(V).
Calling::
slice_(..., method)
sets which interpolation method to be used, where method can be either
'linear' (default), 'cubic', or 'nearest'.
Calling::
slice(ax, ...)
plots into the Axis object ax instead of the current axis.
@return: A Volume object.
Examples:
Visualize the function x*exp(-x**2-y**2-z**2) over the range
-2 > x,y,z < 2:
>>> xx, yy, zz = meshgrid(linspace(-2,2,21), linspace(-2,2,17),
... linspace(-2,2,25))
>>> vv = x*exp(-xx**2-yy**2-zz**2)
>>> slice_(xx, yy, zz, vv, [-1.2,.8,2], 2, [-2,-.2])
"""
if not 'description' in kwargs:
kwargs['description'] = 'slice_: volumetric slices'
ax, args, nargs = self._check_args(*args)
h = Volume(*args, **kwargs)
ax.add(h)
if not ax.getp('hold'):
if 'slice_' in kwargs['description']:
if not 'grid' in kwargs:
kwargs['grid'] = True
if not 'view' in kwargs:
kwargs['view'] = 3
ax.setp(**kwargs)
self.gcf().setp(**kwargs)
self.setp(**kwargs)
if self.getp('interactive') and self.getp('show'):
self._replot()
return h
def contourslice(self, *args, **kwargs):
"""Draw contour lines in slice planes.
Calling::
contourslice(X,Y,Z,V,Sx,Sy,Sz)
will draw contour lines in planes aligned with the coordinate axes
at the points in the arrays Sx, Sy, and Sz. The arrays X, Y, and Z
defines the grid coordinates for the volume V and they must either
have the same shape as V or fulfill the requirement len(X)==n,
len(Y)==m, and len(Z)==p, where m,n,p = shape(V).
Calling::
contourslice(V,Sx,Sy,Sz)
is the same as above, but it is assumed that
X,Y,Z = meshgrid(range(n),range(m),range(p)), where m,n,p = shape(V).
Calling::
contourslice(X,Y,Z,V,XI,YI,ZI)
will draw contour lines through the volume V along the surface given
in the arrays XI, YI, and ZI.
Calling::
contourslice(V,XI,YI,ZI)
is the same as above, but it is assumed that
X,Y,Z = meshgrid(range(n),range(m),range(p)), where m,n,p = shape(V).
Calling::
contourslice(..., n)
will draw n contour lines per plane instead of the default of five
contour lines.
Calling::
contourslice(..., v)
will draw contour lines at the levels given in the array v.
Calling::
contourslice(ax, ...)
uses the Axis object ax instead of the current axis.
@return: A Volume object.
Example:
>>> xx, yy, zz = meshgrid(linspace(-2,2,21), linspace(-2,2,17),
... linspace(-2,2,25))
>>> vv = xx*exp(-xx**2-yy**2-zz**2)
>>> contourslice(xx, yy, zz, vv, [-.7,.7], [], [0], view=3)
"""
kwargs['description'] = 'contourslice: contours in slice planes'
return self.slice_(*args, **kwargs)
def coneplot(self, *args, **kwargs):
"""Draw a 3D cone plot.
Calling::
coneplot(X,Y,Z,U,V,W,Cx,Cy,Cz)
draws velocity vectors as cones from the 3D vector field defined by
U, V, and W at the points given in the arrays Cx, Cy, and Cz. The
arrays X, Y, and Z defines the grid coordinates for vector field. The
shape of U, V, and W is assumed to be the same, while the grid
components must either have the same shape as U or fulfill the
requirement len(X)==n, len(Y)==m, and len(Z)==p, where m,n,p=shape(U).
Calling::
coneplot(U,V,W,Cx,Cy,Cz)
is the same as above, but it is assumed that
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.