input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'''
GOAL: Take .fastq files, corresponding to forward and reverse reads, and
sort each read into separate files depending on which barcodes it contains.
'''
from Bio import SeqIO
import sys
import os
import shutil
from itertools import izip
import multiprocessing, threading
from functools import partial
import time
from cStringIO import StringIO
import argparse
import stat_collector as sc
# The format used for SeqIO to process the files.
FORMAT = "fastq-sanger"
# The key in the SeqRecord objects used to obtain the sequence read quality.
SEQUENCE_QUALITY_KEY = "phred_quality"
# The prefix used to create output files for each barcode in the output directory.
BARCODE_FILE_PREFIX = "barcode_"
# The maximum number of processes to use (for the multithread version only).
NUM_PROCESSES = 15
'''
List of expression strings which will be evaluated and written out to the
params.txt file.
'''
PARAMETER_LIST = ["args.forward", "args.reverse", "args.out", "args.mode", "args.numlines", "index_codes", "barcodes", "BARCODE_FILE_PREFIX"]
### Barcode sorting logic
# The possible index codes, found from the reverse complement of the first six bases
# of the reverse read.
index_codes = ["ATCACG","CGATGT","TTAGGC","TGACCA","ACAGTG","GCCAAT",
"CAGATC","ACTTGA","GATCAG"]
# The possible barcodes, found from the first five bases of the forward read.
barcodes = ["ACTCG","ACTGT", "AATGC", "AGTCA", "ATACG", "ATAGC",
"CGATC", "CTAAG", "CTCGA", "CGAAT", "CTGGT", "CGGTT",
"GACTT", "GTTCA", "GATAC", "GAGCA", "GATGA", "GTCTG",
"TCGGA", "TGACC", "TACTG", "TCCAG", "TCGAC", "TAGCT"]
def get_barcode_number(forward, reverse):
'''
Determines the barcode number for the given forward and reverse sequences.
Modify this function and/or the above string constants to accommodate
different barcoding schemes.
'''
sequence = str(forward.seq)
forward_quality = forward.letter_annotations[SEQUENCE_QUALITY_KEY]
# If any element of the starting barcode has quality less than 20, discard the read
if any(True for i in forward_quality[:5] if i < 20):
return -1
candidate_index = reverse.seq[:6].reverse_complement()
# For now, demands an exact match
index = get_closest_barcode(candidate_index, index_codes, 6)
if index == -1:
return -1
candidate_barcode = sequence[:5]
# Exact match
barcode = get_closest_barcode(candidate_barcode, barcodes, 5)
if barcode == -1:
return -1
return (index * len(barcodes)) + barcode
def accuracy(seq, other_seq, threshold):
'''
Returns the number of identical nucleotides between seq and other_seq. If the
accuracy is less than threshold, returns -1.
'''
acc = len([i for i in range(len(seq)) if seq[i] == other_seq[i]])
if acc < threshold:
return -1
return acc
def get_closest_barcode(sequence, barcode_list, match_threshold=None):
'''
Returns the index of the element of barcode_list that most closely matches
sequence. The number of identical nucleotides must be at least match_threshold.
If match_threshold is not passed, then any number of identical nucleotides
will be admitted.
'''
threshold = match_threshold if match_threshold is not None else 0
index, acc = max(((i, accuracy(sequence, barcode_list[i], threshold)) for i in xrange(len(barcode_list))), key=lambda i: i[1])
if acc < 0:
return -1
return index
### Single thread implementation
def sort_sequences_single_thread(forward_path, reverse_path, out_dir, append=False, chunk_size=1):
records_forward = SeqIO.parse(open(forward_path, "rU"), FORMAT)
records_reverse = SeqIO.parse(open(reverse_path, "rU"), FORMAT)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
output_streams = open_output_streams(out_dir, BARCODE_FILE_PREFIX, len(index_codes) * len(barcodes), append)
if chunk_size > 1:
sort_sequence_chunks(records_forward, records_reverse, chunk_size, output_streams)
else:
for record_item in enumerate(izip(records_forward, records_reverse)):
sort_sequence(output_streams, record_item)
close_output_streams(output_streams)
return 1
def sort_sequence(output_streams, (index, (forward, reverse)), locks=None):
barcode_number = get_barcode_number(forward, reverse)
if barcode_number == -1: return
if locks is not None:
locks[barcode_number].acquire()
SeqIO.write(forward, output_streams[barcode_number], FORMAT)
SeqIO.write(reverse, output_streams[barcode_number], FORMAT)
if locks is not None:
locks[barcode_number].release()
def sort_sequence_chunks(records_forward, records_reverse, chunk_size, output_streams):
'''
Sorts the reads in such a way as to minimize disk seeks. The algorithm works
as follows:
1. Read through the forward file and load `chunk_size` records into memory.
2. Read through the same amount of the reverse file, and for each record,
determine its barcode number and place the corresponding forward and
reverse records into the appropriate bin of a dictionary.
3. Iterate over the dictionary mapping barcode numbers to records, and write
out the records found for each barcode.
4. Repeat until the forward file is over.
For each chunk of the data, we seek once into the reverse file, then once for
each barcode bin that was occupied in that chunk. Thus, the larger the chunks,
the greater the RAM usage and the fewer disk seeks are required to read and
write out the data.
'''
collected_forward = []
def collate_records():
# Collect reverse records now
barcode_bins = {}
for record_index, forward in enumerate(collected_forward):
reverse = next(records_reverse, None)
if reverse is None: break
barcode_number = get_barcode_number(forward, reverse)
if barcode_number == -1: continue
if barcode_number in barcode_bins:
barcode_bins[barcode_number].append((forward, reverse))
else:
barcode_bins[barcode_number] = [(forward, reverse)]
# Write the newly completed entries to disk
for barcode, items in barcode_bins.items():
for forward_item, reverse_item in items:
SeqIO.write(forward_item, output_streams[barcode], FORMAT)
SeqIO.write(reverse_item, output_streams[barcode], FORMAT)
chunk_index = 0
for forward in records_forward:
collected_forward.append(forward)
if len(collected_forward) == chunk_size:
print("Writing chunk {} to file...".format(chunk_index))
collate_records()
collected_forward = []
chunk_index += 1
collate_records()
def open_output_streams(out_dir, prefix, num_files, append=False):
return [open(os.path.join(out_dir, prefix + str(i)), "a" if append else "w") for i in xrange(num_files)]
def close_output_streams(streams):
for stream in streams:
stream.close()
### Multithread implementation
available_chunk_files = []
def sort_sequences_multithread(forward_path, reverse_path, out_dir, num_lines=6e7, num_threads=4):
'''
The idea behind the streaming version is to transfer chunks of size num_lines
into temporary files, then hand them to worker threads to sort. Each worker
thread handles its own set of output files, which will be joined together at
the end of this function. The "master" (this method) is responsible for making
sure that every worker always has a chunk to work on, and that no chunk is
being handled by multiple workers.
'''
global available_chunk_files
if not os.path.exists(out_dir):
os.mkdir(out_dir)
splits = (os.path.join(out_dir, "split_1"), os.path.join(out_dir, "split_2"))
lock = threading.Lock()
streams = (open(forward_path, "rU"), open(reverse_path, "rU"))
workers = [StreamingSortWorker(i, out_dir) for i in xrange(num_threads)]
supply_thread = threading.Thread(target=streaming_work_supplier, args=(lock, streams, splits, num_lines, num_threads * 3))
supply_thread.start()
while supply_thread.is_alive() or len(available_chunk_files) > 0:
for worker in workers:
if not worker.is_working:
with lock:
if len(available_chunk_files) == 0: continue
forward, reverse = available_chunk_files.pop(0)
# Spawn a worker thread
print("Starting chunk {}...".format(os.path.basename(forward)))
worker_thread = threading.Thread(target=worker.sort, args=(forward, reverse))
worker_thread.start()
time.sleep(1)
for worker in workers:
if worker.thread is not None:
worker.thread.join()
map(file.close, streams)
print("Joining files...")
pool = multiprocessing.Pool(processes=NUM_PROCESSES)
processor = partial(join_files_processor, out_dir, len(workers))
pool.map(processor, range(len(index_codes) * len(barcodes)))
print("Cleaning up...")
paths_to_delete = list(splits) + [os.path.join(out_dir, str(i)) for i in xrange(len(workers))]
for path in paths_to_delete:
if os.path.exists(path):
print("Deleting {}".format(path))
shutil.rmtree(path)
print("Done.")
class StreamingSortWorker(object):
'''
An object that lives in its own thread and sorts the reads from the fastq
files it is given into barcodes in its own out directory.
'''
def __init__(self, id, out_base_dir):
self.id = id
self.out_dir = os.path.join(out_base_dir, str(id))
self.is_working = False
self.chunk_number = -1
self.thread = None
def sort(self, forward_input, reverse_input, delete_on_complete=True):
self.thread = threading.current_thread()
self.is_working = True
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
sort_sequences_single_thread(forward_input, reverse_input, self.out_dir, append=True)
if delete_on_complete:
os.remove(forward_input)
os.remove(reverse_input)
self.is_working = False
self.thread = None
def streaming_work_supplier(lock, streams, split_dirs, num_lines, num_chunk_files):
global available_chunk_files
# Write the chunks
forward_stream, reverse_stream = streams
chunk_number = 0
while True:
with lock:
available_files = len(available_chunk_files)
if available_files < num_chunk_files:
print("Transferring chunk {}...".format(chunk_number))
forward_path = os.path.join(split_dirs[0], str(chunk_number))
reverse_path = os.path.join(split_dirs[1], str(chunk_number))
finished_1 = write_file_chunk(forward_stream, forward_path, num_lines)
finished_2 = write_file_chunk(reverse_stream, reverse_path, num_lines)
with lock:
available_chunk_files.append((forward_path, reverse_path))
chunk_number += 1
if finished_1 or finished_2:
print("Finished reading chunks.")
return
def write_file_chunk(file, output, num_lines):
'''
Returns whether or not the file stream ended with this chunk.
'''
dest = os.path.dirname(output)
if not os.path.exists(dest):
os.mkdir(dest)
line_count = 0
with open(output, "w") as out_file:
while line_count < num_lines:
line = file.readline()
if len(line) == 0:
return True
out_file.write(line)
line_count += 1
return False
def join_files(file_paths, out_file):
'''
Concatenates the files at the given paths into the file at out_file.
'''
with open(out_file, "w") as file:
for path in file_paths:
if not os.path.exists(path): continue
with open(path, "rU") as chunk:
for line in chunk:
file.write(line)
def join_files_processor(out_dir, num_chunks, barcode):
#Get the paths for the appropriate reads for each chunk
barcode_paths = [os.path.join(out_dir, str(chunk_number), BARCODE_FILE_PREFIX + str(barcode)) for chunk_number in xrange(num_chunks)]
join_files(barcode_paths, os.path.join(out_dir, BARCODE_FILE_PREFIX + str(barcode)))
for path in barcode_paths:
if not os.path.exists(path): continue
os.remove(path)
if __name__ == '__main__':
a = time.time() # Time the script started
parser = argparse.ArgumentParser(description='Sort a pair of forward and reverse reads in FASTQ format into separate files for each barcode.')
parser.add_argument('forward', metavar='F', type=str,
help='The path to the forward reads')
parser.add_argument('reverse', metavar='R', type=str,
help='The path to the reverse | |
<gh_stars>0
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A basic serializer used to serialize/deserialize Cirq circuits for tfq."""
# TODO(pmassey / anyone): determine if this should be kept as globals.
import copy
import numbers
import sympy
import numpy as np
import cirq
import cirq.google.api.v2 as v2
from tensorflow_quantum.core.proto import pauli_sum_pb2
# Needed to allow autograph to crawl AST without erroring.
_CONSTANT_TRUE = lambda x: True
def _round(x):
return np.round(x, 6) if isinstance(x, float) else x
def _parse_mul(expr):
"""Returns the lhs and rhs of a sympy.Mul. This is written
to prevent autograph from going into sympy library code and having
conflicts with the @cacheit decorator."""
if len(expr.args) == 1:
return sympy.S.One, expr.args[0]
if len(expr.args) == 2:
return expr.args[0], expr.args[1]
raise ValueError("Arithmetic expression outside of simple "
"scalar multiplication is currently not "
"supported. See serializer.py for more "
"information.")
def _scalar_extractor(x):
"""This is a workaround to support symbol scalar multiplication.
In the future we should likely get rid of this in favor of proper
expression parsing once cirq supports it. See cirq.op_serializer
and cirq's program protobuf for details. This is needed for things
like cirq.rx('alpha').
"""
if not isinstance(x, (numbers.Real, sympy.Expr)):
raise TypeError("Invalid input argument for exponent.")
if isinstance(x, (numbers.Real, sympy.Symbol)):
return 1.0
expr = x.evalf()
if isinstance(expr, sympy.mul.Mul):
lhs_eval, rhs_eval = _parse_mul(expr)
if isinstance(lhs_eval, sympy.Symbol) and isinstance(
rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):
# lhs contains symbol rhs contains number.
return _round(float(rhs_eval))
if isinstance(rhs_eval, sympy.Symbol) and isinstance(
lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):
# lhs contains number.
return _round(float(lhs_eval))
raise ValueError("Arithmetic expression outside of simple "
"scalar multiplication is currently not "
"supported. See serializer.py for more "
"information.")
def _symbol_extractor(x):
"""This is the second extractor for above."""
if not isinstance(x, (numbers.Real, sympy.Expr)):
raise TypeError("Invalid input argument for exponent.")
if isinstance(x, numbers.Real):
return _round(float(x))
if isinstance(x, sympy.Symbol):
return x
expr = x.evalf()
if isinstance(expr, sympy.mul.Mul):
lhs_eval, rhs_eval = _parse_mul(expr)
if isinstance(lhs_eval, sympy.Symbol) and isinstance(
rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):
# lhs contains symbol rhs contains number.
return lhs_eval
if isinstance(rhs_eval, sympy.Symbol) and isinstance(
lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):
# lhs contains number.
return rhs_eval
raise ValueError("Arithmetic expression outside of simple "
"scalar multiplication is currently not "
"supported. See serializer.py for more "
"information.")
def _serialize_controls(gate):
"""Helper to serialize control qubits if applicable."""
if hasattr(gate, '_tfq_control_qubits'):
return ','.join(
v2.qubit_to_proto_id(q) for q in gate._tfq_control_qubits)
return ''
def _serialize_control_vals(gate):
"""Helper to serialize control values if applicable.."""
if hasattr(gate, '_tfq_control_values'):
return ','.join(str(v[0]) for v in gate._tfq_control_values)
return ''
class DelayedAssignmentGate(cirq.Gate):
"""Class to do control qubit assignment before sub_gate qubit assignment."""
def __init__(self, gate_callable, control_qubits, control_values):
self._gate_callable = gate_callable
self._control_qubits = control_qubits
self._control_values = control_values
def _qid_shape_(self):
raise ValueError("Called qid_shape on workaround class.")
# pylint: disable=invalid-name
def on(self, *qubits):
"""Returns gate_callable on qubits controlled by contol_qubits."""
return self._gate_callable(*qubits).controlled_by(
*self._control_qubits, control_values=self._control_values)
# pylint: enable=invalid-name
def _optional_control_promote(gate, qubits_message, values_message):
"""Optionally promote to controlled gate based on serialized control msg."""
if qubits_message == '' and values_message == '':
return gate
qbs = [v2.qubit_from_proto_id(qb) for qb in qubits_message.split(',')]
vals = [int(cv) for cv in values_message.split(',')]
return DelayedAssignmentGate(gate, qbs, vals)
def _eigen_gate_serializer(gate_type, serialized_id):
"""Make standard serializer for eigen gates."""
args = [
cirq.google.SerializingArg(
serialized_name="exponent",
serialized_type=float,
op_getter=lambda x: _symbol_extractor(x.gate.exponent)),
cirq.google.SerializingArg(
serialized_name="exponent_scalar",
serialized_type=float,
op_getter=lambda x: _scalar_extractor(x.gate.exponent)),
cirq.google.SerializingArg(
serialized_name="global_shift",
serialized_type=float,
op_getter=lambda x: float(x.gate._global_shift)),
cirq.google.SerializingArg(serialized_name="control_qubits",
serialized_type=str,
op_getter=lambda x: _serialize_controls(x)),
cirq.google.SerializingArg(
serialized_name="control_values",
serialized_type=str,
op_getter=lambda x: _serialize_control_vals(x))
]
return cirq.google.GateOpSerializer(gate_type=gate_type,
serialized_gate_id=serialized_id,
args=args,
can_serialize_predicate=_CONSTANT_TRUE)
def _eigen_gate_deserializer(gate_type, serialized_id):
"""Make standard deserializer for eigen gates."""
def _scalar_combiner(exponent, global_shift, exponent_scalar,
control_qubits, control_values):
"""This is a workaround to support symbol scalar multiplication.
In the future we should likely get rid of this in favor of proper
expression parsing once cirq supports it. See cirq.op_serializer
and cirq's program protobuf for details. This is needed for things
like cirq.rx('alpha').
"""
if exponent_scalar == 1.0:
return _optional_control_promote(
gate_type(exponent=_round(exponent),
global_shift=_round(global_shift)), control_qubits,
control_values)
return _optional_control_promote(
gate_type(exponent=_round(exponent) * _round(exponent_scalar),
global_shift=_round(global_shift)), control_qubits,
control_values)
args = [
cirq.google.DeserializingArg(serialized_name="exponent",
constructor_arg_name="exponent"),
cirq.google.DeserializingArg(serialized_name="global_shift",
constructor_arg_name="global_shift"),
cirq.google.DeserializingArg(serialized_name="exponent_scalar",
constructor_arg_name="exponent_scalar"),
cirq.google.DeserializingArg(serialized_name="control_qubits",
constructor_arg_name="control_qubits"),
cirq.google.DeserializingArg(serialized_name="control_values",
constructor_arg_name="control_values")
]
return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id,
gate_constructor=_scalar_combiner,
args=args)
def _fsim_gate_serializer():
"""Make standard serializer for fsim gate."""
args = [
cirq.google.SerializingArg(
serialized_name="theta",
serialized_type=float,
op_getter=lambda x: _symbol_extractor(x.gate.theta)),
cirq.google.SerializingArg(
serialized_name="phi",
serialized_type=float,
op_getter=lambda x: _symbol_extractor(x.gate.phi)),
cirq.google.SerializingArg(
serialized_name="theta_scalar",
serialized_type=float,
op_getter=lambda x: _scalar_extractor(x.gate.theta)),
cirq.google.SerializingArg(
serialized_name="phi_scalar",
serialized_type=float,
op_getter=lambda x: _scalar_extractor(x.gate.phi)),
cirq.google.SerializingArg(serialized_name="control_qubits",
serialized_type=str,
op_getter=lambda x: _serialize_controls(x)),
cirq.google.SerializingArg(
serialized_name="control_values",
serialized_type=str,
op_getter=lambda x: _serialize_control_vals(x))
]
return cirq.google.GateOpSerializer(gate_type=cirq.FSimGate,
serialized_gate_id="FSIM",
args=args,
can_serialize_predicate=_CONSTANT_TRUE)
def _fsim_gate_deserializer():
"""Make standard deserializer for fsim gate."""
def _scalar_combiner(theta, theta_scalar, phi, phi_scalar, control_qubits,
control_values):
"""This is a workaround to support symbol scalar multiplication.
See `_eigen_gate_deserializer` for details.
"""
return _optional_control_promote(
cirq.FSimGate(theta=_round(theta) * _round(theta_scalar),
phi=_round(phi) * _round(phi_scalar)), control_qubits,
control_values)
args = [
cirq.google.DeserializingArg(serialized_name="theta",
constructor_arg_name="theta"),
cirq.google.DeserializingArg(serialized_name="phi",
constructor_arg_name="phi"),
cirq.google.DeserializingArg(serialized_name="theta_scalar",
constructor_arg_name="theta_scalar"),
cirq.google.DeserializingArg(serialized_name="phi_scalar",
constructor_arg_name="phi_scalar"),
cirq.google.DeserializingArg(serialized_name="control_qubits",
constructor_arg_name="control_qubits"),
cirq.google.DeserializingArg(serialized_name="control_values",
constructor_arg_name="control_values")
]
return cirq.google.GateOpDeserializer(serialized_gate_id="FSIM",
gate_constructor=_scalar_combiner,
args=args)
def _identity_gate_serializer():
"""Make a standard serializer for the single qubit identity."""
def _identity_check(x):
if x.gate.num_qubits() != 1:
raise ValueError("Multi-Qubit identity gate not supported."
"Given: {}. To work around this, use "
"cirq.I.on_each instead.".format(str(x)))
return True
# Here `args` is used for two reasons. 1. GateOpSerializer doesn't work well
# with empty arg lists. 2. It is a nice way to check identity gate size.
args = [
cirq.google.SerializingArg(serialized_name="unused",
serialized_type=bool,
op_getter=_identity_check),
cirq.google.SerializingArg(serialized_name="control_qubits",
serialized_type=str,
op_getter=lambda x: _serialize_controls(x)),
cirq.google.SerializingArg(
serialized_name="control_values",
serialized_type=str,
op_getter=lambda x: _serialize_control_vals(x))
]
return cirq.google.GateOpSerializer(gate_type=cirq.IdentityGate,
serialized_gate_id="I",
args=args,
can_serialize_predicate=_CONSTANT_TRUE)
def _identity_gate_deserializer():
"""Make a standard deserializer for the single qubit identity."""
args = [
cirq.google.DeserializingArg(serialized_name="unused",
constructor_arg_name="unused"),
cirq.google.DeserializingArg(serialized_name="control_qubits",
constructor_arg_name="control_qubits"),
cirq.google.DeserializingArg(serialized_name="control_values",
constructor_arg_name="control_values")
]
def _cirq_i_workaround(unused, control_qubits, control_values):
return _optional_control_promote(cirq.I, control_qubits, control_values)
return cirq.google.GateOpDeserializer(serialized_gate_id="I",
gate_constructor=_cirq_i_workaround,
args=args)
def _phased_eigen_gate_serializer(gate_type, serialized_id):
"""Make a standard serializer for phased eigen gates."""
args = [
cirq.google.SerializingArg(
serialized_name="phase_exponent",
serialized_type=float,
op_getter=lambda x: _symbol_extractor(x.gate.phase_exponent)),
cirq.google.SerializingArg(
serialized_name="phase_exponent_scalar",
serialized_type=float,
op_getter=lambda x: _scalar_extractor(x.gate.phase_exponent)),
cirq.google.SerializingArg(
serialized_name="exponent",
serialized_type=float,
op_getter=lambda x: _symbol_extractor(x.gate.exponent)),
cirq.google.SerializingArg(
serialized_name="exponent_scalar",
serialized_type=float,
op_getter=lambda x: _scalar_extractor(x.gate.exponent)),
cirq.google.SerializingArg(
serialized_name="global_shift",
serialized_type=float,
op_getter=lambda x: float(x.gate.global_shift)),
cirq.google.SerializingArg(serialized_name="control_qubits",
serialized_type=str,
op_getter=lambda x: _serialize_controls(x)),
cirq.google.SerializingArg(
serialized_name="control_values",
serialized_type=str,
op_getter=lambda x: _serialize_control_vals(x))
]
return cirq.google.GateOpSerializer(gate_type=gate_type,
serialized_gate_id=serialized_id,
args=args,
can_serialize_predicate=_CONSTANT_TRUE)
def _phased_eigen_gate_deserializer(gate_type, serialized_id):
"""Make a standard deserializer for phased eigen gates."""
def _scalar_combiner(exponent, global_shift, exponent_scalar,
phase_exponent, phase_exponent_scalar, control_qubits,
control_values):
"""This is a workaround to support symbol scalar multiplication.
In the future we should likely get rid of this in favor of proper
expression parsing once cirq supports it. See cirq.op_serializer
and cirq's program protobuf for details. This is needed for things
like cirq.rx('alpha').
"""
exponent = _round(exponent)
phase_exponent = _round(phase_exponent)
exponent = exponent if exponent_scalar == 1.0 \
else exponent * _round(exponent_scalar)
phase_exponent = phase_exponent if phase_exponent_scalar == 1.0 \
else phase_exponent * _round(phase_exponent_scalar)
if global_shift != 0:
# needed in case this specific phasedeigengate doesn't
# have a global_phase in constructor.
return _optional_control_promote(
gate_type(exponent=exponent,
global_shift=_round(global_shift),
phase_exponent=phase_exponent), control_qubits,
control_values)
return _optional_control_promote(
gate_type(exponent=exponent, phase_exponent=phase_exponent),
control_qubits, control_values)
args = [
cirq.google.DeserializingArg(serialized_name="phase_exponent",
constructor_arg_name="phase_exponent"),
cirq.google.DeserializingArg(
serialized_name="phase_exponent_scalar",
constructor_arg_name="phase_exponent_scalar"),
cirq.google.DeserializingArg(serialized_name="exponent",
constructor_arg_name="exponent"),
cirq.google.DeserializingArg(serialized_name="exponent_scalar",
constructor_arg_name="exponent_scalar"),
cirq.google.DeserializingArg(serialized_name="global_shift",
constructor_arg_name="global_shift"),
cirq.google.DeserializingArg(serialized_name="control_qubits",
constructor_arg_name="control_qubits"),
cirq.google.DeserializingArg(serialized_name="control_values",
constructor_arg_name="control_values")
]
return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id,
gate_constructor=_scalar_combiner,
args=args)
EIGEN_GATES_DICT = {
cirq.XPowGate: "XP",
cirq.XXPowGate: "XXP",
cirq.YPowGate: "YP",
cirq.YYPowGate: "YYP",
cirq.ZPowGate: "ZP",
cirq.ZZPowGate: "ZZP",
cirq.HPowGate: "HP",
cirq.CZPowGate: "CZP",
cirq.CNotPowGate: "CNP",
cirq.SwapPowGate: "SP",
cirq.ISwapPowGate: "ISP",
}
PHASED_EIGEN_GATES_DICT = {
cirq.PhasedXPowGate: "PXP",
cirq.PhasedISwapPowGate: "PISP",
}
SERIALIZERS = [
_eigen_gate_serializer(g, g_name) for g, g_name in EIGEN_GATES_DICT.items()
] + [
_fsim_gate_serializer(),
] + [
_identity_gate_serializer(),
] + [
_phased_eigen_gate_serializer(g, g_name)
for g, g_name in PHASED_EIGEN_GATES_DICT.items()
]
DESERIALIZERS = [
_eigen_gate_deserializer(g, g_name)
for g, g_name in EIGEN_GATES_DICT.items()
] + [
_fsim_gate_deserializer(),
] + [
_identity_gate_deserializer(),
] + [
_phased_eigen_gate_deserializer(g, g_name)
for g, g_name in PHASED_EIGEN_GATES_DICT.items()
]
SERIALIZER = cirq.google.SerializableGateSet(gate_set_name="tfq_gate_set",
serializers=SERIALIZERS,
deserializers=DESERIALIZERS)
def serialize_circuit(circuit_inp):
"""Returns a `cirq.Program` proto representing the `cirq.Circuit`.
Note that the circuit must use gates valid in the tfq_gate_set.
Currently we only support scalar multiplication of symbols and
no other more complex arithmetic expressions. This means
we can support things like X**(3*alpha), and Rx(alpha). Because
we use the `cirq.Program` proto, we only support `cirq.GridQubit` instances
during serialization of circuits.
Note: once serialized terminal measurements are removed.
Args:
circuit_inp: A `cirq.Circuit`.
| |
'\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': | |
<reponame>icreator/shopForBTCMerchant<gh_stars>0
#!/usr/bin/env python
# coding: utf8
import datetime
from decimal import Decimal
from gluon import current
T = current.T
import common
import crypto_client
import shops_lib
import db_client
import wager_lib
#
# на самом деле это не сервер - он не висит постоянно в памяти и не выполняется параллельно
# просто при приходе блока вызывчается curl со ссылкой на страницу проекта
# start /MIN curl http://127.0.0.1:8000/ipay3/tools/block_proc/%1/%2 -s >nul
# see !notify-curl.cmd in C:\web2py\applications\ipay4\wallets
# see bitcoin.conf and !notify.cmd in ./bitcoin
def log(db, l2, mess='>'):
m = 's_block_proc'
mess = '%s' % mess
print m, '[%s]' % l2, mess
db.logs.insert(label123456789 = m, label1234567890 = l2, mess = mess)
def log_commit(db, l2, mess='>'):
log(db, l2, mess)
db.commit()
def return_trans(db, in_block, curr, xcurr, addr, acc, amo, txid, vout):
amo = amo - xcurr.txfee
if amo < xcurr.txfee:
# остаток мизерный не высылаем
return
print 'unknown [%s] address %s for account:"%s"' % (curr.abbrev, addr, acc)
# если не найдено в делах то запомним в неизвестных
# на отсылку обрано - может заказ уже просрочен а платеж только пришел
db.pay_ins_return.insert(
xcurr_id = xcurr.id,
in_block = in_block,
amount = amo, # тут уже за минусом комиссии возврат
txid=txid, vout=vout,
)
return True
def b_p_db_update( db, conn, curr, xcurr, tab, curr_block, not_ret=None):
to_commit = None
ret = not not_ret # не возвращать - если тестовый вызов
# сюда приходят все наши входы - запомнить их
#orders_update = {}
#main_addrs = cn.getaddressesbyaccount('.main.')
#conf_addrs = cn.getaddressesbyaccount('.confirm.')
for rec in tab: #.iteritems():
amo = rec['amo']
acc = rec['acc']
addr = rec['addr']
txid = rec['txid']
vout = rec['vout']
confs = rec['confs'] # если 0 - то не используем ее для возврата и подтверждения изменения инфо и прочее
in_block = curr_block - confs + 1 # тут может быть старая транзакция с ХХ подтверждениями
#if len(acc)==0:
# # пропустим пустые а то они все будут подходить
# continue
#print xcurr.abbrev, 'acc:"%s"' % acc, ' unspent:',amo
#print datetime.datetime.fromtimestamp(rec['time'])
#print rec, '\n'
# тут если крипта не используется - то все они будут отвергнуты
shop_order_addr = curr.used and db((db.shop_order_addrs.addr==addr)
& (db.shop_order_addrs.xcurr_id==xcurr.id)).select().first()
if not shop_order_addr:
# такого адреса у нас нет пропустим...
if confs>0:
# только для тех что подтверждены - чтобы дважды проводки не записывались
if acc == '.main.':
# если это приход на главный адрес - например пополнения с обмена\
# то такую проводку пропустим
continue
elif acc == '.confirm.':
# если это вход для подтверждения изменения клиентских данных
shops_lib.try_confirm_changes(db, xcurr, txid, vout)
to_commit = True
# и сразу их вернем обратно
# тут если на выходе Ноне то берем старый to_commit
to_commit = ret and return_trans(db, in_block, curr, xcurr, addr, acc, amo, txid, vout) or to_commit
# берем следующую проводку
continue
elif shop_order_addr.unused and confs>0:
# тут если на выходе Ноне то берем старый to_commit
to_commit = ret and return_trans(db, in_block, curr, xcurr, addr, acc, amo, txid, vout) or to_commit
continue
# теперь в таблице от unspent без повторов - так как там блок каждый раз новый
trans = db((db.pay_ins.txid==txid) & (db.pay_ins.vout==vout)).select().first()
if trans:
#print 'b_p_db_update exist:', amo, txid, vout
if confs>0:
# уже такая есть и она с подтверждением
to_commit = True
# если она была внесена с 0-м подтверждением то исправим у нее блок
# а статус поменяется дальше при осмотре заказов
################################# &&****&&&&&???????
print 'pay_ins %s in_block %s updated' % (amo, in_block)
trans.update_record(in_block = in_block)
tr_stack = db(db.pay_ins_stack.ref_id==trans.id).select().first()
if tr_stack:
# ну запись в стеке изменим тоже
tr_stack.update_record(in_block = in_block)
# следующую берем запись
continue
to_commit = True
created_on = datetime.datetime.fromtimestamp(rec['time'])
#print 'block_proc - db.pay_ins.insert', created_on
pay_ins = db(db.pay_ins.txid == txid ).select().first()
if pay_ins:
# какимто образом эта запись уже нами была обработана и есть в стеке
# возможно из-за отката цепочки блоков
continue
pay_id = db.pay_ins.insert(
shop_order_addr_id = shop_order_addr.id,
amount = amo,
in_block = in_block,
txid=txid, vout=vout,
created_on = created_on,
)
db.pay_ins_stack.insert(
ref_id = pay_id,
xcurr_id = xcurr.id,
in_block = in_block,
)
'''
#orders_update[shop_order_addr.shop_order_id]=1 # запомним заказы которые надо обновить
# тепеь данные по этой крипте для этого заказ обновим
shop_order_addr.update_record(amount = shop_order_addr.amount + amo,
in_block = in_block,
amo_out = shop_order_addr.amo_out + amo_out
)
'''
return to_commit # сообщим - надо ли базу сохранять
# TODO
# тут по 2 раза выдает выходы если они сгенерированы
# проверка - по
# http://127.0.0.1:8000/shop/tools/proc_unspent/BTC/3333/1MfMcg8J7rKKGUNmGRvEhHCZHiqPzdAzoB
#
##############
# найдем все входы крипты на адреса наших заказов
# на выходе массив по входам
# тут от 0 до conf вглуь + генерация на 120 блоков старше
# тоесть берутся ссамые свежие входы
def b_p_proc_unspent( db, conn, curr, xcurr, conf=None, addr_in=None ):
#print 'BALANCE:', conn.getbalance(), addr_in and (' for addr_in:',conn.getbalance(addr_in)) or 'addr_in=None'
# проверим непотраченные монеты на адресах,
# которые созданы для приема крипты
#
# тут ограничиваем просмотр входящих неизрасходованных
# транзакций по подтверждениям с учетом номера обработанного блока
# с заданием макс_подтв так чтобы не брать уже обработанные
# а начинаем всегда 1-го подтверждения, потом будем уже разбирать кол-во подтверждений
tab = []
conf = conf == None and 1 or conf # если пусто то по умолчанию с одним подтверждением
if conf:
lUnsp = conn.listunspent( 0, conf) # например с 1-го по 1й
#print '\n******************\n lUnsp(0, %s):' % conf
#for r in lUnsp: print r
if type(lUnsp) == type({}):
# ошибка
log(db, 'b_p_proc_unspent', 'listunspent %s' % lUnsp)
return tab
# теперь для вновь появившихся сгенерированных
# они появлюяются в unspent через 120 подтверждений
conf_gen = xcurr.conf_gen or 120
if conf_gen < conf:
# если с подтверждения больше чем подт_генерации то сдвинем их
conf_gen = conf + 1
l_generate = conn.listunspent( conf_gen, conf_gen + conf - 1) # например с 120-го по 120й
#print '\n l_generate:\n'
#for r in l_generate: print r
lUnsp += l_generate
else:
# блок = -1 значит ищес неподтвержденные только
lUnsp = conn.listunspent( 0, 0)
if type(lUnsp) == type({}):
# ошибка
log(db, 'b_p_proc_unspent', 'listunspent %s' % lUnsp)
return tab
#for r in lUnsp: print '\n',r
#print len(lUnsp)
for r in lUnsp:
# выдает входящие транзакции причем те что не израсходовались
# берем только подтвержденные нами и только входы - у них нет выходов в транзакции
# иначе это сдача от выхода
acc = r.get(u'account')
if acc and acc == '.main.': continue # свои проводки не проверяем
txid = r[u'txid']
ti = conn.gettransaction(txid)
# тут массив - может быть несколько транзакций
# может быть [u'category'] == u'receive' ИЛИ u'send'
trans_details = ti['details']
#log(db, 'b_p_proc_unspent', 'trans_details %s' % trans_details)
# так вот, в одной транзакции может быть несколько входов!
# оказывается и с 1м есть выход в деталях - сдачи может и не быть
its_outcome = False
for detail in trans_details:
if detail[u'category'] == u'send':
its_outcome = True
# сдача тут
break
if its_outcome:
continue
amo = r[u'amount']
vout = r[u'vout']
addr = r.get(u'address')
if not addr:
# если адреса нет то берем его из рав-транзакции
rawtr = conn.getrawtransaction(txid, 1)
vouts = rawtr[u'vout']
trans = vouts[vout]
#print trans
addr = trans[u'scriptPubKey'][u'addresses'][0]
if addr_in and addr_in != addr: continue
if not acc:
acc = conn.getaccount(addr)
#print acc, addr
#print amo, txid, vout
# запомним сумму монет на вывод
tab.append({
'acc': acc, 'amo': amo,
'confs': r[u'confirmations'],
# запомним данные для поиска потом
'txid':txid, 'vout': vout,
'addr': addr,
'time':ti[u'time']})
return tab
# берем адреса на возврат или выплату из таблицы и
# формируем send_many
# комиссию тут не учитываем - не вычитаем ! - при создании записей на возврат это надо делать??
# эта функция в других модулях не используется
def return_refused(db, curr, xcurr, conn, curr_block, table):
# возвраты - вышлем за раз все по даной крипте
addrs = {}
ids = []
vol = 0.0
for r in db(table.xcurr_id == xcurr.id).select():
if curr_block - xcurr.conf_hard - 2 < r.in_block: continue # только зрелые назад возвернем
# тут надо еще проверить - а не исчезла ли эта проводка - мож она доубле спенд былда или в блоке двойном
if r.txid and len(r.txid)>20 and not crypto_client.trans_exist(conn, r.txid):
#### тут отдельные выходы не проверяем на малость их - только сумму -- r.amount < xcurr.txfee*3:
# это не выплата для тестового_магазина - там в записи адрес уже стоит
# и такой транзакции в | |
<filename>rmgpy/pdep/network.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains the :class:`Network` class, a representation of a
pressure-dependent unimolecular reaction network
"""
import math
import numpy
import logging
import rmgpy.constants as constants
from rmgpy.reaction import Reaction
from rmgpy.exceptions import NetworkError, InvalidMicrocanonicalRateError
################################################################################
class Network:
"""
A representation of a unimolecular reaction network. The attributes are:
======================= ====================================================
Attribute Description
======================= ====================================================
`isomers` A list of the unimolecular isomers in the network
`reactants` A list of the bimolecular reactant channels (Configuration objects) in the network
`products` A list of the bimolecular product channels (Configuration objects) in the network
`pathReactions` A list of "path" reaction objects that connect adjacent isomers (the high-pressure-limit)
`bathGas` A dictionary of the bath gas species (keys) and their mole fractions (values)
`netReactions` A list of "net" reaction objects that connect any pair of isomers
----------------------- ----------------------------------------------------
`T` The current temperature in K
`P` The current pressure in bar
`Elist` The current array of energy grains in kJ/mol
`Jlist` The current array of total angular momentum quantum numbers
----------------------- ----------------------------------------------------
`Nisom` The number of unimolecular isomers in the network
`Nreac` The number of bimolecular reactant channels in the network
`Nprod` The number of bimolecular product channels in the network
`Ngrains` The number of energy grains
`NJ` The number of angular momentum grains
----------------------- ----------------------------------------------------
`grainSize` Maximum size of separation between energies
`grainCount` Minimum number of descrete energies separated
`E0` A list of ground state energies of isomers, reactants, and products
`activeKRotor` ``True`` if the K-rotor is treated as active, ``False`` if treated as adiabatic
`activeJRotor` ``True`` if the J-rotor is treated as active, ``False`` if treated as adiabatic
`rmgmode` ``True`` if in RMG mode, ``False`` otherwise
----------------------- ----------------------------------------------------
`eqRatios` An array containing concentration of each isomer and reactant channel present at equilibrium
`collFreq` An array of the frequency of collision between
`Mcoll` Matrix of first-order rate coefficients for collisional population transfer between grains for each isomer
`densStates` 3D np array of stable configurations, number of grains, and number of J
======================= ====================================================
"""
def __init__(self, label='', isomers=None, reactants=None, products=None,
pathReactions=None, bathGas=None, netReactions=None, T=0.0, P =0.0,
Elist = None, Jlist = None, Ngrains = 0, NJ = 0, activeKRotor = True,
activeJRotor = True, grainSize=0.0, grainCount = 0, E0 = None):
"""
To initialize a Network object for running a pressure dependent job,
only label, isomers, reactants, products pathReactions and bathGas are useful,
since the other attributes will be created during the run.
The other attributes are used to reinstantiate the created network object
for debugging and testing.
"""
self.label = label
self.isomers = isomers or []
self.reactants = reactants or []
self.products = products or []
self.pathReactions = pathReactions or []
self.bathGas = bathGas or {}
self.netReactions = netReactions or []
self.T = T
self.P = P
self.Elist = Elist
self.Jlist = Jlist
self.Nisom = len(self.isomers)
self.Nreac = len(self.reactants)
self.Nprod = len(self.products)
self.Ngrains = Ngrains
self.NJ = NJ
self.activeKRotor = activeKRotor
self.activeJRotor = activeJRotor
self.grainSize = grainSize
self.grainCount = grainCount
self.E0 = E0
self.valid = False
def __repr__(self):
string = 'Network('
if self.label != '': string += 'label="{0}", '.format(self.label)
if self.isomers: string += 'isomers="{0!r}", '.format(self.isomers)
if self.reactants: string += 'reactants="{0!r}", '.format(self.reactants)
if self.products: string += 'products="{0!r}", '.format(self.products)
if self.pathReactions: string += 'pathReactions="{0!r}", '.format(self.pathReactions)
if self.bathGas: string += 'bathGas="{0!r}", '.format(self.bathGas)
if self.netReactions: string += 'netReactions="{0!r}", '.format(self.netReactions)
if self.T != 0.0: string += 'T="{0}", '.format(self.T)
if self.P != 0.0: string += 'P="{0}", '.format(self.P)
if self.Elist is not None: string += 'Elist="{0}", '.format(self.Elist)
if self.Jlist is not None: string += 'Jlist="{0}", '.format(self.Jlist)
if self.Ngrains != 0: string += 'Ngrains="{0}", '.format(self.Ngrains)
if self.NJ != 0: string += 'NJ="{0}", '.format(self.NJ)
string += 'activeKRotor="{0}", '.format(self.activeKRotor)
string += 'activeJRotor="{0}", '.format(self.activeJRotor)
if self.grainSize != 0.0: string += 'grainSize="{0}", '.format(self.grainSize)
if self.grainCount != 0: string += 'grainCount="{0}", '.format(self.grainCount)
if self.E0 is not None: string += 'E0="{0}", '.format(self.E0)
string += ')'
return string
def __str__(self):
"""return Network like it would be seen in an Arkane input file"""
return "Network(label = '{0}', isomers = {1}, reactants = {2}, products = {3}, "\
"pathReactions = {4}, bathGas = {5}, "\
"netReactions = {6})".format(self.label, [i.species[0].label for i in self.isomers],
[[r.label for r in pair.species] for pair in self.reactants],
[[p.label for p in pair.species] for pair in self.products],
[r.label for r in self.pathReactions],
dict([(s.label, value) for s, value in self.bathGas.items()]),
[r.toLabeledStr() for r in self.netReactions])
def invalidate(self):
"""
Mark the network as in need of a new calculation to determine the
pressure-dependent rate coefficients
"""
self.valid = False
def getAllSpecies(self):
"""
Return a list of all unique species in the network, including all
isomers, reactant and product channels, and bath gas species.
"""
speciesList = []
for isomer in self.isomers:
for spec in isomer.species:
if spec not in speciesList: speciesList.append(spec)
for reactant in self.reactants:
for spec in reactant.species:
if spec not in speciesList: speciesList.append(spec)
for product in self.products:
for spec in product.species:
if spec not in speciesList: speciesList.append(spec)
for spec in self.bathGas:
if spec not in speciesList: speciesList.append(spec)
return speciesList
def initialize(self, Tmin, Tmax, Pmin, Pmax, maximumGrainSize=0.0, minimumGrainCount=0, activeJRotor=True, activeKRotor=True, rmgmode=False):
"""
Initialize a pressure dependence calculation by computing several
quantities that are independent of the conditions. You must specify
the temperature and pressure ranges of interesting using `Tmin` and
`Tmax` in K and `Pmin` and `Pmax` in Pa. You must also specify the
maximum energy grain size `grainSize` in J/mol and/or the minimum
number of grains `grainCount`.
"""
logging.debug("initializing network")
if maximumGrainSize == 0.0 and minimumGrainCount == 0:
raise NetworkError('Must provide either grainSize or Ngrains parameter to Network.determineEnergyGrains().')
self.Tmin = Tmin
self.Tmax = Tmax
self.Pmin = Pmin
self.Pmax = Pmax
self.grainSize = maximumGrainSize
self.grainCount = minimumGrainCount
self.Nisom = len(self.isomers)
self.Nreac = len(self.reactants)
self.Nprod = len(self.products)
self.Ngrains = 0
self.NJ = 0
# Calculate ground-state energies
self.E0 = numpy.zeros((self.Nisom+self.Nreac+self.Nprod), numpy.float64)
for i in range(self.Nisom):
self.E0[i] = self.isomers[i].E0
for n in range(self.Nreac):
self.E0[n+self.Nisom] = self.reactants[n].E0
for n in range(self.Nprod):
self.E0[n+self.Nisom+self.Nreac] = self.products[n].E0
# Calculate densities of states
self.activeJRotor = activeJRotor
self.activeKRotor = activeKRotor
self.rmgmode = rmgmode
self.calculateDensitiesOfStates()
logging.debug('Finished initialization for network {0}.'.format(self.label))
logging.debug('The network now has values of {0}'.format(repr(self)))
def calculateRateCoefficients(self, Tlist, Plist, method, errorCheck=True):
Nisom = len(self.isomers)
Nreac = len(self.reactants)
Nprod = len(self.products)
for rxn in self.pathReactions:
if len(rxn.transitionState.conformer.modes) > 0:
logging.debug('Using RRKM theory to compute k(E) for path reaction {0}.'.format(rxn))
elif rxn.kinetics is not None:
logging.debug('Using ILT method to compute k(E) for path reaction {0}.'.format(rxn))
logging.debug('')
logging.info('Calculating phenomenological rate coefficients for {0}...'.format(rxn))
K = numpy.zeros((len(Tlist),len(Plist),Nisom+Nreac+Nprod,Nisom+Nreac+Nprod), numpy.float64)
for t, T in enumerate(Tlist):
for p, P in enumerate(Plist):
self.setConditions(T, P)
# Apply method
if method.lower() == 'modified strong | |
try:
shutil.rmtree(dir)
logger.debug("Remove directory: " + dir)
except EnvironmentError:
logger.error("Unable to remove directory: " + dir)
else:
logger.debug("Request to remove directory that does not exist: " + dir)
def break_up_fasta_file(fasta_file, max_seqs):
"""
Break up a fasta file into smaller fasta files with max_seqs
"""
# check file exists
file_exists_readable(fasta_file)
file_handle_read=open(fasta_file,"rt")
line=file_handle_read.readline()
new_file=unnamed_temp_file()
file_out=open(new_file,"w")
fasta_files=[new_file]
current_seq=0
while line:
if not re.search("^>",line):
file_out.write(line)
else:
if current_seq == max_seqs:
# close current file and open new
file_out.close()
new_file=unnamed_temp_file()
file_out=open(new_file,"w")
fasta_files+=[new_file]
file_out.write(line)
current_seq=1
else:
current_seq+=1
file_out.write(line)
line=file_handle_read.readline()
file_out.close()
file_handle_read.close()
return fasta_files
def fastq_to_fasta(file, apply_pick_frames=None, length_annotation=None):
"""
Convert fastq file to fasta
Also pick frames for sequences if set
Fastq format short example:
@SEQ_ID
GATCTGG
+
!''****
Fasta format short example:
>SEQ_INFO
GATCTGG
Returns error if not of fasta or fastq format
"""
# check file exists
file_exists_readable(file)
file_handle_read = open(file, "rt")
line = file_handle_read.readline()
new_file=unnamed_temp_file()
file_out=open(new_file,"w")
sequence=""
sequence_id=""
while line:
if re.search("^@",line):
# write previous sequence
if sequence:
if apply_pick_frames:
sequences=pick_frames.pick_frames(sequence)
else:
sequences=[sequence]
if length_annotation:
sequence_id=add_length_annotation(sequence_id,len(sequence))
for sequence in sequences:
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
sequence_id=line.replace("@",">",1).rstrip()
sequence=""
elif re.search("^[A|a|T|t|G|g|C|c|N|n]+$", line):
sequence+=line.rstrip()
line=file_handle_read.readline()
# write out the last sequence
if sequence:
if apply_pick_frames:
sequences=pick_frames.pick_frames(sequence)
else:
sequences=[sequence]
if length_annotation:
sequence_id=add_length_annotation(sequence_id,len(sequence))
for sequence in sequences:
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
file_out.close()
file_handle_read.close()
return new_file
def pick_frames_from_fasta(file, length_annotation=None):
"""
Convert fasta file to picked frames
"""
# check file exists
file_exists_readable(file)
file_handle_read = open(file, "rt")
line = file_handle_read.readline()
new_file=unnamed_temp_file()
file_out=open(new_file,"w")
sequence=""
while line:
if not re.search("^>",line):
sequence+=line.rstrip()
else:
# if a sequence has been read in then pick frames and write
if sequence:
sequences=pick_frames.pick_frames(sequence)
if length_annotation:
sequence_id=add_length_annotation(sequence_id,len(sequence))
for sequence in sequences:
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
sequence=""
sequence_id=line.rstrip()
line=file_handle_read.readline()
# if a sequence has been read in then pick frames and write
if sequence:
sequences=pick_frames.pick_frames(sequence)
if length_annotation:
sequence_id=add_length_annotation(sequence_id,len(sequence))
for sequence in sequences:
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
file_out.close()
file_handle_read.close()
return new_file
def length_annotate_fasta(file):
"""
Add annotations of the lengths of the sequences to the fasta sequence ids
"""
# check file exists
file_exists_readable(file)
file_handle_read = open(file, "rt")
line = file_handle_read.readline()
new_file=unnamed_temp_file()
file_out=open(new_file,"w")
sequence=""
while line:
if not re.search("^>",line):
sequence+=line.rstrip()
else:
# if a sequence has been read in then annotate and write
if sequence:
sequence_id=add_length_annotation(sequence_id,len(sequence))
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
sequence=""
sequence_id=line.rstrip()
line=file_handle_read.readline()
# if a sequence has been read in then annotate and write
if sequence:
sequence_id=add_length_annotation(sequence_id,len(sequence))
file_out.write(sequence_id+"\n")
file_out.write(sequence+"\n")
file_out.close()
file_handle_read.close()
return new_file
def tsv_to_biom(tsv_file, biom_file, table_type):
"""
Convert from a tsv to biom file using the biom API
"""
try:
import biom
except ImportError:
sys.exit("Could not find the biom software."+
" This software is required since the output file is a biom file.")
try:
import numpy
except ImportError:
sys.exit("Could not find the numpy software."+
" This software is required since the output file is a biom file.")
try:
import h5py
except ImportError:
sys.exit("Could not find the h5py software."+
" This software is required since the output file is a biom file.")
# read the tsv file
ids=[]
data=[]
with open(tsv_file) as file_handle:
samples=file_handle.readline().rstrip().split("\t")[1:]
for line in file_handle:
row=line.rstrip().split("\t")
ids.append(row[0])
data.append(row[1:])
# reformat the rows into a biom table
table=biom.Table(numpy.array(data), ids, samples)
# write a h5py biom table
with h5py.File(biom_file, 'w') as file_handle:
table.to_hdf5(file_handle, table_type)
def biom_to_tsv(biom_file):
"""
Convert from a biom to tsv file
"""
# create a unnamed temp file
new_tsv_file=unnamed_temp_file()
exe="biom"
args=["convert","-i",biom_file,"-o",new_tsv_file,"--to-tsv"]
message="Converting biom file to tsv ..."
logger.info(message)
execute_command(exe, args, [biom_file], [new_tsv_file])
return new_tsv_file
def format_float_to_string(number):
"""
Format a float to a string using the config max number of decimals
"""
return "{:.{digits}f}".format(number, digits=config.output_max_decimals)
def byte_to_gigabyte(byte):
"""
Convert byte value to gigabyte
"""
return byte / (1024.0**3)
def byte_to_megabyte(byte):
"""
Convert byte value to megabyte
"""
return byte / (1024.0**2)
def byte_to_kilobyte(byte):
"""
Convert byte value to kilobyte
"""
return byte / 1024.0
def log_system_status():
"""
Print the status of the system
"""
module_available=True
try:
import psutil
except ImportError:
module_available=False
if module_available:
try:
# record the memory used
memory = psutil.virtual_memory()
logger.info("Total memory = " + str(byte_to_gigabyte(memory.total)) + " GB")
logger.info("Available memory = " + str(byte_to_gigabyte(memory.available)) + " GB")
logger.info("Free memory = " + str(byte_to_gigabyte(memory.free)) + " GB")
logger.info("Percent memory used = " + str(memory.percent) + " %")
# record the cpu info
logger.info("CPU percent = " + str(psutil.cpu_percent()) + " %")
logger.info("Total cores count = " + str(psutil.cpu_count()))
# record the disk usage
disk = psutil.disk_usage('/')
logger.info("Total disk = " + str(byte_to_gigabyte(disk.total)) + " GB")
logger.info("Used disk = "+ str(byte_to_gigabyte(disk.used)) + " GB")
logger.info("Percent disk used = " + str(disk.percent) + " %")
# record information about this current process
process=psutil.Process()
process_memory=process.memory_info()
process_create_time=datetime.datetime.fromtimestamp(
process.create_time()).strftime("%Y-%m-%d %H:%M:%S")
process_cpu_times=process.cpu_times()
# two calls required to cpu percent for non-blocking as per documentation
process_cpu_percent=process.cpu_percent()
process_cpu_percent=process.cpu_percent()
logger.info("Process create time = " + process_create_time)
logger.info("Process user time = " + str(process_cpu_times.user) + " seconds")
logger.info("Process system time = " + str(process_cpu_times.system) + " seconds")
logger.info("Process CPU percent = " + str(process_cpu_percent) + " %")
logger.info("Process memory RSS = " + str(byte_to_gigabyte(process_memory.rss)) + " GB")
logger.info("Process memory VMS = " + str(byte_to_gigabyte(process_memory.vms)) + " GB")
logger.info("Process memory percent = " + str(process.memory_percent()) + " %")
except (AttributeError, OSError, TypeError, psutil.Error):
pass
def add_length_annotation(id, length):
"""
Add the length to the query id
"""
# add the length and handle spaces as translated search will split on spaces
return id.split(" ")[0]+config.query_length_annotation_delimiter+str(length)
def remove_length_annotation(id):
"""
Remove the length from the query id
"""
return config.query_length_annotation_delimiter.join(id.split(config.query_length_annotation_delimiter)[:-1])
def get_length_annotation(id):
"""
Try to get the length annotation from the query id
"""
# check for the annotation delimiter
if config.query_length_annotation_delimiter in id:
info=id.split(config.query_length_annotation_delimiter)
try:
# the last item is the length
length=int(info.pop())
# the first and remaining items are the id
new_id=config.query_length_annotation_delimiter.join(info)
except (ValueError, IndexError):
length=1
new_id=id
else:
# if not present, then return full id and default length
new_id=id
length=1
return new_id, length
def get_filtered_translated_alignments(alignment_file_tsv, alignments, apply_filter=None,
log_filter=None, unaligned_reads_store=None):
"""
Read through the alignment file, yielding filtered alignments
Filter based on identity threshold, evalue, and coverage threshold
Remove from unaligned reads store if set
"""
# read through the alignment file to identify ids
# that correspond to aligned reads
# all translated alignment files will be of the tabulated blast format
file_handle=open(alignment_file_tsv,"rt")
line=file_handle.readline()
log_evalue=False
large_evalue_count=0
small_identity_count=0
small_query_coverage_count=0
percent_identity_convert_error=0
alignment_length_convert_error=0
evalue_convert_error=0
rapsearch_evalue_convert_error=0
while line:
if re.search("^#",line):
# Check for the rapsearch2 header to determine if these are log(e-value)
if re.search(config.blast_delimiter,line):
data=line.split(config.blast_delimiter)
if len(data)>config.blast_evalue_index:
if re.search("log",data[config.blast_evalue_index]):
log_evalue=True
else:
alignment_info=line.split(config.blast_delimiter)
# try to obtain the identity value to determine if threshold is met
identity=alignment_info[config.blast_identity_index]
try:
identity=float(identity)
except ValueError:
percent_identity_convert_error+=1
identity=0.0
queryid=alignment_info[config.blast_query_index]
# try converting the alignment length to a number
alignment_length=alignment_info[config.blast_aligned_length_index]
try:
alignment_length=float(alignment_length)
except ValueError:
alignment_length_convert_error+=1
alignment_length=0.0
# try converting evalue to float to check if it is a number
evalue=alignment_info[config.blast_evalue_index]
try:
evalue=float(evalue)
except ValueError:
evalue_convert_error+=1
evalue=1.0
# try to get the start and end positions for the query
try:
query_start_index = int(alignment_info[config.blast_query_start_index])
query_stop_index = int(alignment_info[config.blast_query_end_index])
except (ValueError, IndexError):
query_start_index=0
query_stop_index=0
# check for query length annotation
queryid, query_length = get_length_annotation(queryid)
# try to get the start and end positions for the subject
try:
subject_start_index = int(alignment_info[config.blast_subject_start_index])
subject_stop_index = int(alignment_info[config.blast_subject_end_index])
except (ValueError, IndexError):
subject_start_index=0
subject_stop_index=0
# convert rapsearch evalue to blastm8 format if logged
if log_evalue:
try:
evalue=math.pow(10.0, evalue)
except (ValueError, OverflowError):
rapsearch_evalue_convert_error+=1
evalue=1.0
# compute the number of matches
matches=identity/100.0*alignment_length
# get the protein alignment information
protein_name, gene_length, bug = alignments.process_reference_annotation(
alignment_info[config.blast_reference_index])
# check if percent identity is less then threshold
filter=False
if identity < config.identity_threshold:
filter=True
small_identity_count+=1
# filter alignments with evalues greater than threshold
if evalue > config.evalue_threshold:
filter=True
large_evalue_count+=1
# filter alignments based on query coverage
if query_length > 1:
query_coverage = ( ( abs(query_stop_index - query_start_index) + 1) / float(query_length) )* 100.0
else:
# | |
"art": "constructed",
"art-blk": "Bolak",
"art-bsp": "Black Speech",
"art-com": "Communicationssprache",
"art-dtk": "Dothraki",
"art-elo": "Eloi",
"art-gld": "Goa'uld",
"art-lap": "Lapine",
"art-man": "Mandalorian",
"art-mun": "Mundolinco",
"art-nav": "Na'vi",
"art-nox": "Noxilo",
"art-una": "Unas",
"aru": "Arua",
"arv": "Arbore",
"arw": "Arawak",
"arx": "Aruá",
"ary": "Moroccan Arabic",
"arz": "Egyptian Arabic",
"as": "Assamese",
"asa": "Pare",
"asb": "Assiniboine",
"asc": "Casuarina Coast Asmat",
"ase": "American Sign Language",
"asf": "Auslan",
"asg": "Cishingini",
"ash": "Abishira",
"asi": "Buruwai",
"asj": "Nsari",
"ask": "Ashkun",
"asl": "Asilulu",
"asn": "Xingú Asuriní",
"aso": "Dano",
"asp": "Algerian Sign Language",
"asq": "Austrian Sign Language",
"asr": "Asuri",
"ass": "Ipulo",
"ast": "Asturian",
"asu": "Tocantins Asurini",
"asv": "Asoa",
"asw": "Australian Aboriginal Sign Language",
"asx": "Muratayak",
"asy": "Yaosakor Asmat",
"asz": "As",
"ata": "Pele-Ata",
"atb": "Zaiwa",
"atc": "Atsahuaca",
"atd": "Ata Manobo",
"ate": "Atemble",
"atg": "Okpela",
"ath": "Athabaskan",
"ath-nic": "Nicola",
"ath-nor": "North Athabaskan",
"ath-pco": "Pacific Coast Athabaskan",
"ath-pro": "Proto-Athabaskan",
"ati": "Attié",
"atj": "Atikamekw",
"atk": "Ati",
"atl": "Mount Iraya Agta",
"atm": "Ata",
"atn": "Ashtiani",
"ato": "Atong (Cameroon)",
"atp": "Pudtol Atta",
"atq": "Aralle-Tabulahan",
"atr": "Waimiri-Atroari",
"ats": "Gros Ventre",
"att": "Pamplona Atta",
"atu": "Reel",
"atv": "Northern Altai",
"atw": "Atsugewi",
"atx": "Arutani",
"aty": "Aneityum",
"atz": "Arta",
"aua": "Asumboa",
"aub": "Alugu",
"auc": "Huaorani",
"aud": "Anuta",
"auf": "Arauan",
"auf-pro": "Proto-Arawa",
"aug": "Aguna",
"auh": "Aushi",
"aui": "Anuki",
"auj": "Awjila",
"auk": "Heyo",
"aul": "Aulua",
"aum": "Asu",
"aun": "Molmo One",
"auo": "Auyokawa",
"aup": "Makayam",
"auq": "Anus",
"aur": "Aruek",
"aus-alu": "Alungul",
"aus-and": "Andjingith",
"aus-ang": "Angkula",
"aus-arn": "Arnhem",
"aus-arn-pro": "Proto-Arnhem",
"aus-bra": "Barranbinya",
"aus-brm": "Barunggam",
"aus-bub": "Bunuban",
"aus-cww": "Central New South Wales",
"aus-cww-pro": "Proto-Central New South Wales",
"aus-dal": "Daly",
"aus-dal-pro": "Proto-Daly",
"aus-dyb": "Dyirbalic",
"aus-gar": "Garawan",
"aus-gun": "Gunwinyguan",
"aus-guw": "Guwar",
"aus-jar": "Jarrakan",
"aus-kar": "Karnic",
"aus-lsw": "Little Swanport",
"aus-mbi": "Mbiywom",
"aus-mir": "Mirndi",
"aus-nga": "Ngayarda",
"aus-ngk": "Ngkoth",
"aus-nyu": "Nyulnyulan",
"aus-nyu-pro": "Proto-Nyulnyulan",
"aus-pam": "Pama-Nyungan",
"aus-pam-pro": "Proto-Pama-Nyungan",
"aus-pmn": "Paman",
"aus-psw": "Southwest Pama-Nyungan",
"aus-rnd": "Arandic",
"aus-tnk": "Tangkic",
"aus-tul": "Tulua",
"aus-uwi": "Uwinymil",
"aus-wdj": "Iwaidjan",
"aus-wdj-pro": "Proto-Iwaidjan",
"aus-won": "Wong-gie",
"aus-wor": "Worrorran",
"aus-wul": "Wulguru",
"aus-yid": "Yidinyic",
"aus-yng": "Yangmanic",
"aus-ynk": "Yangkaal",
"aus-yol": "Yolngu",
"aus-yuk": "Yuin-Kuric",
"aut": "Austral",
"auu": "Auye",
"auw": "Awyi",
"aux": "Aurá",
"auy": "Auyana",
"auz": "Uzbeki Arabic",
"av": "Avar",
"avb": "Avau",
"avd": "Alviri-Vidari",
"avi": "Avikam",
"avk": "Kotava",
"avm": "Angkamuthi",
"avn": "Avatime",
"avo": "Agavotaguerra",
"avs": "Aushiri",
"avt": "Au",
"avu": "Avokaya",
"avv": "Avá-Canoeiro",
"awa": "Awadhi",
"awa-old": "Old Awadhi",
"awb": "Awa (New Guinea)",
"awc": "Cicipu",
"awd": "Arawakan",
"awd-ama": "Amarizana",
"awd-amc-pro": "Proto-Amuesha-Chamicuro",
"awd-ana": "Anauyá",
"awd-apo": "Apolista",
"awd-cav": "Cavere",
"awd-gnu": "Guinau",
"awd-kar": "Cariay",
"awd-kaw": "Kawishana",
"awd-kmp-pro": "Proto-Kampa",
"awd-kus": "Kustenau",
"awd-man": "Manao",
"awd-mar": "Marawan",
"awd-mpr": "Maypure",
"awd-mrt": "Mariaté",
"awd-nwk": "Nawiki",
"awd-nwk-pro": "Proto-Nawiki",
"awd-pai": "Paikoneka",
"awd-pas": "Passé",
"awd-pro": "Proto-Arawak",
"awd-prw-pro": "Proto-Paresi-Waura",
"awd-she": "Shebayo",
"awd-taa": "Ta-Arawakan",
"awd-taa-pro": "Proto-Ta-Arawak",
"awd-wai": "Wainumá",
"awd-yum": "Yumana",
"awe": "Awetí",
"awg": "Anguthimri",
"awh": "Awbono",
"awi": "Aekyom",
"awk": "Awabakal",
"awm": "Arawum",
"awn": "Awngi",
"awo": "Awak",
"awr": "Awera",
"aws": "South Awyu",
"awt": "Araweté",
"awu": "Central Awyu",
"awv": "Jair Awyu",
"aww": "Awun",
"awx": "Awara",
"awy": "Edera Awyu",
"axb": "Abipon",
"axe": "Ayerrerenge",
"axg": "<NAME>",
"axk": "Aka (Central Africa)",
"axl": "Lower Southern Aranda",
"axm": "Middle Armenian",
"axx": "Xaragure",
"ay": "Aymara",
"aya": "Awar",
"ayb": "Ayizo",
"ayd": "Ayabadhu",
"aye": "Ayere",
"ayg": "Nyanga (Togo)",
"ayi": "Leyigha",
"ayk": "Akuku",
"ayl": "Libyan Arabic",
"ayn": "Yemeni Arabic",
"ayo": "Ayoreo",
"ayp": "North Mesopotamian Arabic",
"ayq": "Ayi",
"ays": "Sorsogon Ayta",
"ayt": "Bataan Ayta",
"ayu": "Ayu",
"ayy": "Tayabas Ayta",
"ayz": "Maybrat",
"az": "Azerbaijani",
"az-cls": "Classical Azerbaijani",
"aza": "Azha",
"azc": "Uto-Aztecan",
"azc-caz": "Cazcan",
"azc-cup": "Cupan",
"azc-cup-pro": "Proto-Cupan",
"azc-ktn": "Kitanemuk",
"azc-nah": "Nahuan",
"azc-nah-pro": "Proto-Nahuan",
"azc-num": "Numic",
"azc-num-pro": "Proto-Numic",
"azc-pro": "Proto-Uto-Aztecan",
"azc-tak": "Takic",
"azc-tak-pro": "Proto-Takic",
"azc-tat": "Tataviam",
"azc-trc": "Taracahitic",
"azd": "Eastern Durango Nahuatl",
"azg": "San Pedro Amuzgos Amuzgo",
"azm": "Ipalapa Amuzgo",
"azn": "Western Durango Nahuatl",
"azo": "Awing",
"azt": "Faire Atta",
"azz": "Highland Puebla Nahuatl",
"ba": "Bashkir",
"baa": "Babatana",
"bab": "Bainouk-Gunyuño",
"bac": "Badui",
"bad": "Banda",
"bad-cnt": "Central Banda",
"bae": "Baré",
"baf": "Nubaca",
"bag": "Tuki",
"bah": "Bahamian Creole",
"bai": "Bamileke",
"baj": "Barakai",
"bal": "Baluchi",
"bal-eas": "Eastern Balochi",
"bal-sou": "Southern Balochi",
"bal-wes": "Western Balochi",
"ban": "Balinese",
"bao": "Waimaha",
"bap": "Bantawa",
"bar": "Bavarian",
"bas": "Basaa",
"bat": "Baltic",
"bat-pro": "Proto-Baltic",
"bau": "Badanchi",
"bav": "Babungo",
"baw": "Bambili-Bambui",
"bax": "Bamum",
"bay": "Batuley",
"bba": "Baatonum",
"bbb": "Barai",
"bbc": "Toba Batak",
"bbd": "Bau",
"bbe": "Bangba",
"bbf": "Baibai",
"bbg": "Barama",
"bbh": "Bugan",
"bbi": "Barombi",
"bbj": "Ghomala'",
"bbk": "Babanki",
"bbl": "Bats",
"bbm": "Babango",
"bbn": "Uneapa",
"bbo": "Konabéré",
"bbp": "West Central Banda",
"bbq": "Bamali",
"bbr": "Girawa",
"bbs": "Bakpinka",
"bbt": "Mburku",
"bbu": "Bakulung",
"bbv": "Karnai",
"bbw": "Baba",
"bbx": "Bubia",
"bby": "Befang",
"bca": "Central Bai",
"bcb": "Bainouk-Samik",
"bcc": "Southern Balochi",
"bcd": "North Babar",
"bce": "Bamenyam",
"bcf": "Bamu",
"bcg": "Baga Pokur",
"bch": "Bariai",
"bci": "Baoule",
"bcj": "Bardi",
"bck": "Bunaba",
"bcl": "Bikol Central",
"bcm": "Banoni",
"bcn": "Bibaali",
"bco": "Kaluli",
"bcp": "Bali",
"bcq": "Bench",
"bcr": "Babine-Witsuwit'en",
"bcs": "Kohumono",
"bct": "Bendi",
"bcu": "Biliau",
"bcv": "Shoo-Minda-Nye",
"bcw": "Bana",
"bcy": "Bacama",
"bcz": "Bainouk-Gunyaamolo",
"bda": "Bayot",
"bdb": "Basap",
"bdc": "Emberá-Baudó",
"bdd": "Bunama",
"bde": "Bade",
"bdf": "Biage",
"bdg": "Bonggi",
"bdh": "Tara Baka",
"bdi": "Burun",
"bdj": "Bai",
"bdk": "Budukh",
"bdl": "Indonesian Bajau",
"bdm": "Buduma",
"bdn": "Baldemu",
"bdo": "Morom",
"bdp": "Bende",
"bdq": "Bahnar",
"bdr": "West Coast Bajau",
"bds": "Burunge",
"bdt": "Bokoto",
"bdu": "Oroko",
"bdv": "Bodo Parja",
"bdw": "Baham",
"bdx": "Budong-Budong",
"bdy": "Bandjalang",
"bdz": "Badeshi",
"be": "Belarusian",
"bea": "Beaver",
"beb": "Bebele",
"bec": "Iceve-Maci",
"bed": "Bedoanas",
"bee": "Byangsi",
"bef": "Benabena",
"beg": "Belait",
"beh": "Biali",
"bei": "Bekati'",
"bej": "Beja",
"bek": "Bebeli",
"bem": "Bemba",
"beo": "Beami",
"bep": "Besoa",
"beq": "Beembe",
"ber": "Berber",
"ber-fog": "Fogaha",
"ber-pro": "Proto-Berber",
"ber-zuw": "Zuwara",
"bes": "Besme",
"bet": "Guiberoua Bété",
"beu": "Blagar",
"bev": "Daloa Bété",
"bew": "Betawi",
"bew-kot": "Betawi Kota",
"bew-ora": "Betawi Ora",
"bew-udi": "Betawi Udik",
"bex": "J<NAME>",
"bey": "Akuwagel",
"bez": "Kibena",
"bfa": "Bari",
"bfb": "<NAME>",
"bfc": "Northern Bai",
"bfd": "Bafut",
"bfe": "Betaf",
"bff": "Bofi",
"bfg": "Busang Kayan",
"bfh": "Blafe",
"bfi": "British Sign Language",
"bfj": "Bafanji",
"bfk": "Ban Khor Sign Language",
"bfl": "Banda-Ndélé",
"bfm": "Mmen",
"bfn": "Bunak",
"bfo": "Malba Birifor",
"bfp": "Beba",
"bfq": "Badaga",
"bfr": "Bazigar",
"bfs": "Southern Bai",
"bft": "Balti",
"bfu": "Gahri",
"bfw": "Bondo",
"bfx": "Bantayanon",
"bfy": "Bagheli",
"bfz": "Mahasu Pahari",
"bg": "Bulgarian",
"bga": "Gwamhi-Wuri",
"bgb": "Bobongko",
"bgc": "Haryanvi",
"bgd": "Rathwi Bareli",
"bge": "Bauria",
"bgf": "Bangandu",
"bgg": "Bugun",
"bgi": "Giangan",
"bgj": "Bangolan",
"bgk": "Bit",
"bgl": "Bo",
"bgn": "Western Balochi",
"bgo": "Baga Koga",
"bgp": "Eastern Balochi",
"bgq": "Bagri",
"bgr": "Bawm Chin",
"bgs": "Tagabawa",
"bgt": "Bughotu",
"bgu": "Mbongno",
"bgv": "Warkay-Bipim",
"bgw": "Bhatri",
"bgx": "Balkan Gagauz Turkish",
"bgy": "Benggoi",
"bgz": "Banggai",
"bh": "Bihari",
"bha": "Bharia",
"bhb": "Bhili",
"bhc": "Biga",
"bhd": "Bhadrawahi",
"bhe": "Bhaya",
"bhf": "Odiai",
"bhg": "Binandere",
"bhh": "Bukhari",
"bhi": "Bhilali",
"bhj": "Bahing",
"bhl": "Bimin",
"bhl-prk": "Bahliki",
"bhm": "Bathari",
"bhn": "Bohtan Neo-Aramaic",
"bho": "Bhojpuri",
"bhp": "Bima",
"bhq": "Tukang Besi South",
"bhs": "Buwal",
"bht": "Bhattiyali",
"bhu": "Bhunjia",
"bhv": "Bahau",
"bhw": "Biak",
"bhx": "Bhalay",
"bhy": "Bhele",
"bhz": "Bada",
"bi": "Bislama",
"bia": "Badimaya",
"bib": "Bissa",
"bic": "Bikaru",
"bid": "Bidiyo",
"bie": "Bepour",
"bif": "Biafada",
"big": "Biangai",
"bij": "Kwanka",
"bil": "Bile",
"bim": "Bimoba",
"bin": "Edo",
"bio": "Nai",
"bip": "Bila",
"biq": "Bipi",
"bir": "Bisorio",
"bit": "Berinomo",
"biu": "Biete",
"biv": "Southern Birifor",
"biw": "Kol (Cameroon)",
"bix": "Bijori",
"biy": "Birhor",
"biz": "Baloi",
"bja": "Budza",
"bjb": "Barngarla",
"bjc": "Bariji",
"bje": "Biao-J<NAME>",
"bjf": "Barzani Jewish Neo-Aramaic",
"bjg": "Bidyogo",
"bjh": "Bahinemo",
"bji": "Burji",
"bjj": "Kannauji",
"bjk": "Barok",
"bjl": "Bulu (New Guinea)",
"bjm": "Bajelani",
"bjn": "Banjarese",
"bjo": "Mid-Southern Banda",
"bjp": "Fanamaket",
"bjr": "Binumarien",
"bjs": "Bajan",
"bjt": "Balanta-Ganja",
"bju": "Busuu",
"bjv": "Bedjond",
"bjw": "Bakwé",
"bjx": "Banao Itneg",
"bjy": "Bayali",
"bjz": "Baruga",
"bka": "Kyak",
"bkc": "Baka",
"bkd": "Binukid",
"bkf": "Beeke",
"bkg": "Buraka",
"bkh": "Bakoko",
"bki": "Baki",
"bkj": "Pande",
"bkk": "Brokskat",
"bkl": "Berik",
"bkm": "Kom (Cameroon)",
"bkn": "Bukitan",
"bko": "Kwa'",
"bkp": "Iboko",
"bkq": "Bakairí",
"bkr": "Bakumpai",
"bks": "Mas<NAME>",
"bkt": "Boloki",
"bku": "Buhid",
"bkv": "Bekwarra",
"bkw": "Bekwel",
"bkx": "Baikeno",
"bky": "Bokyi",
"bkz": "Bungku",
"bla": "Blackfoot",
"blb": "Bilua",
"blc": "Bella Coola",
"bld": "Bolango",
"ble": "Balanta-Kentohe",
"blf": "Buol",
"blg": "Balau",
"blh": "Kuwaa",
"bli": "Bolia",
"blj": "Bolongan",
"blk": "<NAME>",
"bll": "Biloxi",
"blm": "Beli",
"bln": "Southern Catanduanes Bicolano",
"blo": "Anii",
"blp": "Blablanga",
"blq": "Baluan-Pam",
"blr": "Blang",
"bls": "Balaesang",
"blt": "Tai Dam",
"blv": "Kibala",
"blw": "Balangao",
"blx": "Mag-Indi Ayta",
"bly": "Notre",
"blz": "Balantak",
"bm": "Bambara",
"bma": "Lame",
"bmb": "Bembe",
"bmc": "Biem",
"bmd": "<NAME>",
"bme": "Limassa",
"bmf": "Bom",
"bmg": "Bamwe",
"bmh": "Kein",
"bmi": "Bagirmi",
"bmj": "Bote-Majhi",
"bmk": "Ghayavi",
"bml": "Bomboli",
"bmn": "Bina",
"bmo": "Bambalang",
"bmp": "Bulgebi",
"bmq": "Bomu",
"bmr": "Muinane",
"bmt": "Biao Mon",
"bmu": "Somba-Siawari",
"bmv": "Bum",
"bmw": "Bomwali",
| |
= list(itertools.chain(*[c for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
pausacolcheia = list(itertools.chain(*[pc for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
pausaseminima = list(itertools.chain(*[psm for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
pausasemicolcheia = list(itertools.chain(*[ps for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
pausaminima = list(itertools.chain(*[pm for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
pausasemibreve = list(itertools.chain(*[psb for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
barracompasso = list(itertools.chain(*[bc for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
sustenidos = list(itertools.chain(*[st for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
bemois = list(itertools.chain(*[bm for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
marcacaotempo = list(itertools.chain(*[mt for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
colcheias = list(itertools.chain(*[cc for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
semibreves = list(itertools.chain(*[sb for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
outros_simbolos = list(itertools.chain(*[os for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
semicolcheias = list(itertools.chain(*[smc for m, s, g, f, c, ps, pc, psm, pm, psb, bc, st, bm, mt, cc, sb, os, smc in figuras]))
img_seminimas = [self.extrair_imagem(tipo, s) for s in seminimas]
img_minimas = [self.extrair_imagem(tipo, m) for m in minimas]
img_clavesol = [self.extrair_imagem(tipo, g) for g in clavesol]
img_clavefa = [self.extrair_imagem(tipo, f) for f in clavefa]
img_clavedo = [self.extrair_imagem(tipo, c) for c in clavedo]
img_pausacolcheia = [self.extrair_imagem(tipo, pc) for pc in pausacolcheia]
img_pausaseminima = [self.extrair_imagem(tipo, ps) for ps in pausaseminima]
img_pausasemicolcheia = [self.extrair_imagem(tipo, psm) for psm in pausasemicolcheia]
img_pausaminima = [self.extrair_imagem(tipo, pm) for pm in pausaminima]
img_pausasemibreve = [self.extrair_imagem(tipo, psb) for psb in pausasemibreve]
img_barracompasso = [self.extrair_imagem(tipo, bc) for bc in barracompasso]
img_sustenidos = [self.extrair_imagem(tipo, bc) for bc in sustenidos]
img_bemois = [self.extrair_imagem(tipo, bm) for bm in bemois]
img_marcacao_tempo = [self.extrair_imagem(tipo, mt) for mt in marcacaotempo]
img_colcheias = [self.extrair_imagem(tipo, cc) for cc in colcheias]
img_semibreves = [self.extrair_imagem(tipo, cc) for cc in semibreves]
img_outros_simbolos = [self.extrair_imagem(tipo, cc) for cc in outros_simbolos]
img_semicolcheias = [self.extrair_imagem(tipo, cc) for cc in semicolcheias]
#if tipo == 'MANUSCRITO':
img_seminimas = [resize(sm, (self.ALTURA, self.LARGURA)) for sm in img_seminimas]
img_minimas = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_minimas]
img_clavesol = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_clavesol]
img_clavefa = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_clavefa]
img_clavedo = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_clavedo]
img_pausacolcheia = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_pausacolcheia]
img_pausaseminima = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_pausaseminima]
img_pausasemicolcheia = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_pausasemicolcheia]
img_pausaminima = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_pausaminima]
img_pausasemibreve = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_pausasemibreve]
img_barracompasso = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_barracompasso]
img_sustenidos = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_sustenidos]
img_bemois = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_bemois]
img_marcacao_tempo = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_marcacao_tempo]
img_colcheias = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_colcheias]
img_semibreves = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_semibreves]
img_outros_simbolos = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_outros_simbolos]
img_semicolcheias = [resize(mn, (self.ALTURA, self.LARGURA)) for mn in img_semicolcheias]
# And re-binarize, to compensate for interpolation effects
for im in img_seminimas:
im[im > 0] = 1
for im in img_minimas:
im[im > 0] = 1
for im in img_clavesol:
im[im > 0] = 1
for im in img_clavefa:
im[im > 0] = 1
for im in img_clavedo:
im[im > 0] = 1
for im in img_pausacolcheia:
im[im > 0] = 1
for im in img_pausaseminima:
im[im > 0] = 1
for im in img_pausasemicolcheia:
im[im > 0] = 1
for im in img_pausaminima:
im[im > 0] = 1
for im in img_pausasemibreve:
im[im > 0] = 1
for im in img_barracompasso:
im[im > 0] = 1
for im in img_sustenidos:
im[im > 0] = 1
for im in img_bemois:
im[im > 0] = 1
for im in img_marcacao_tempo:
im[im > 0] = 1
for im in img_colcheias:
im[im > 0] = 1
for im in img_semibreves:
im[im > 0] = 1
for im in img_outros_simbolos:
im[im > 0] = 1
for im in img_semicolcheias:
im[im > 0] = 1
rotulos_seminimas = [self.ROTULO_SEMINIMA for _ in img_seminimas]
rotulos_minimas = [self.ROTULO_MINIMA for _ in img_minimas]
rotulos_clavesol = [self.ROTULO_CLAVESOL for _ in img_clavesol]
rotulos_clavefa = [self.ROTULO_CLAVEFA for _ in img_clavefa]
rotulos_clavedo = [self.ROTULO_CLAVEDO for _ in img_clavedo]
rotulos_pausacolcheia = [self.ROTULO_PAUSACOLCHEIA for _ in img_pausacolcheia]
rotulos_pausaseminima = [self.ROTULO_PAUSASEMINIMA for _ in img_pausaseminima]
rotulos_pausasemicolcheia = [self.ROTULO_PAUSASEMICOLCHEIA for _ in img_pausasemicolcheia]
rotulos_pausaminima = [self.ROTULO_PAUSAMINIMA for _ in img_pausaminima]
rotulos_pausasemibreve = [self.ROTULO_PAUSASEMIBREVE for _ in img_pausasemibreve]
rotulos_barracompasso = [self.ROTULO_BARRACOMPASSO for _ in img_barracompasso]
rotulos_sustenido = [self.ROTULO_SUSTENIDOS for _ in img_sustenidos]
rotulos_bemois = [self.ROTULO_BEMOIS for _ in img_bemois]
rotulos_marcacao_tempo = [self.ROTULO_MARCACAO_TEMPO for _ in img_marcacao_tempo]
rotulos_colcheias = [self.ROTULO_COLCHEIA for _ in img_colcheias]
rotulos_semibreves = [self.ROTULO_SEMIBREVE for _ in img_semibreves]
rotulos_outros_simbolos = [self.ROTULO_OUTROS for _ in img_outros_simbolos]
rotulos_semicolcheias = [self.ROTULO_SEMICOLCHEIAS for _ in img_semicolcheias]
self.misturadas = img_seminimas +\
img_minimas + \
img_clavesol + \
img_clavefa + \
img_clavedo + \
img_pausaminima + \
img_pausacolcheia + \
img_pausaseminima + \
img_pausasemicolcheia + \
img_pausasemibreve + \
img_barracompasso+ \
img_sustenidos + \
img_bemois + \
img_marcacao_tempo + \
img_colcheias + \
img_semibreves + \
img_outros_simbolos + \
img_semicolcheias
# converte imagem em matrix unidimencional
self.figuras_array_linha = [n.flatten() for n in self.misturadas]
self.rotulos_classe = rotulos_seminimas + \
rotulos_minimas + \
rotulos_clavesol + \
rotulos_clavefa + \
rotulos_clavedo + \
rotulos_pausacolcheia + \
rotulos_pausaseminima + \
rotulos_pausasemicolcheia + \
rotulos_pausaminima + \
rotulos_pausasemibreve + \
rotulos_barracompasso+ \
rotulos_sustenido + \
rotulos_bemois + \
rotulos_marcacao_tempo + \
rotulos_colcheias + \
rotulos_semibreves + \
rotulos_outros_simbolos + \
rotulos_semicolcheias
self.X_conjunto_treino, self.X_conjunto_teste, self.Y_conjunto_treino, self.Y_conjunto_teste = train_test_split(
self.figuras_array_linha, self.rotulos_classe, test_size=0.40, random_state=self.RANDOMSTATE,
stratify=self.rotulos_classe)
self.KNN = KNeighborsClassifier(n_neighbors=self.K_VIZINHOS_PROXIMOS)
self.KNN.fit(self.X_conjunto_treino, self.Y_conjunto_treino)
def getDadosRotulo(self, dados, rotulos, rotulo, indice):
ret = []
size = len(dados) -1;
for idx in range(0, size):
try:
if (rotulos[idx] == rotulo):
ret.append(dados[idx][indice])
except:
print ('Out of range')
return ret
def visualizaPontos(self, dados, rotulos, d1, d2):
fig, ax = plt.subplots()
ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_SEMINIMA, d1), self.getDadosRotulo(dados, rotulos, self.ROTULO_SEMINIMA, d2), c='black', marker='.')
ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_MINIMA, d1), self.getDadosRotulo(dados, rotulos, self.ROTULO_MINIMA, d2), c='blue', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVEDO, d1), self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVEDO, d2), c='purple', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVESOL, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVESOL, d2), c='yellow', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVEFA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_CLAVEFA, d2), c='red', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSACOLCHEIA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSACOLCHEIA, d2), c='lime', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMINIMA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMINIMA, d2), c='cyan', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMICOLCHEIA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMICOLCHEIA, d2), c='orange', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMIBREVE, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSASEMIBREVE, d2), c='green', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSAMINIMA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_PAUSAMINIMA, d2), c='aqua', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_BARRACOMPASSO, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_BARRACOMPASSO, d2), c='navy', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_SUSTENIDOS, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_SUSTENIDOS, d2), c='indigo', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_BEMOIS, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_BEMOIS, d2), c='teal', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_MARCACAO_TEMPO, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_MARCACAO_TEMPO, d2), c='gold', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_COLCHEIA, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_COLCHEIA, d2), c='crimson', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_SEMIBREVE, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_SEMIBREVE, d2), c='maroon', marker='.')
#ax.scatter(self.getDadosRotulo(dados, rotulos, self.ROTULO_OUTROS, d1),self.getDadosRotulo(dados, rotulos, self.ROTULO_OUTROS, d2), c='orchid', marker='.')#
plt.savefig(self.DIR_TREINAMENTO + 'distribuicao.png')
def showDistribuicao(self, tipo):
plt.ioff()
if | |
#!/usr/bin/env python
# whisker_serial_order/models.py
"""
===============================================================================
Copyright © 2016-2018 <NAME> (<EMAIL>).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
SQLAlchemy models and other data storage classes for the serial order task.
"""
from argparse import ArgumentTypeError
import logging
from typing import Any, List, Iterable, Optional, Set, Tuple
import arrow
from cardinal_pythonlib.sqlalchemy.alembic_func import (
ALEMBIC_NAMING_CONVENTION,
)
from cardinal_pythonlib.sqlalchemy.arrow_types import ArrowMicrosecondType
from cardinal_pythonlib.sqlalchemy.orm_inspect import (
deepcopy_sqla_object,
SqlAlchemyAttrDictMixin,
)
from sqlalchemy import (
BigInteger,
Boolean,
Column,
Float,
ForeignKey,
Integer,
MetaData,
String, # variable length in PostgreSQL; specify length for MySQL
Text, # variable length
)
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, Session
from sqlalchemy.sql.type_api import TypeDecorator
from sqlalchemy_utils import ScalarListType
from whisker_serial_order.constants import (
DATETIME_FORMAT_PRETTY,
MAX_EVENT_LENGTH,
MAX_HOLE_NUMBER,
MIN_HOLE_NUMBER,
MIN_SERIAL_ORDER_POSITION,
MAX_SERIAL_ORDER_POSITION,
)
from whisker_serial_order.extra import latency_s
from whisker_serial_order.version import (
MAX_VERSION_LENGTH,
SERIAL_ORDER_VERSION,
)
log = logging.getLogger(__name__)
# =============================================================================
# Constants
# =============================================================================
MAX_GENERIC_STRING_LENGTH = 255
MAX_HOLE_OR_SERIALPOS_PAIR_DEFINITION_STRING_LENGTH = 255 # more than enough!
N_HOLES_FOR_CHOICE = 2
# =============================================================================
# SQLAlchemy base.
# =============================================================================
# Derived classes will share the specified metadata.
MASTER_META = MetaData(naming_convention=ALEMBIC_NAMING_CONVENTION)
Base = declarative_base(metadata=MASTER_META)
# =============================================================================
# Helper functions/classes
# =============================================================================
def spatial_to_serial_order(hole_sequence: List[int],
holes: List[int]) -> List[int]:
"""
Converts a temporal sequence of spatial holes into a list of serial
order positions.
Converts the list of spatial holes in use (``hole_sequence``) and the
temporal sequence of hole indexes (``holes``) into a sequence of spatial
hole numbers.
Args:
hole_sequence: ordered list of spatial hole numbers to be presented
in the first phase of the task, e.g. [3, 1, 4].
holes: spatial hole numbers to be enquired about: "what was the
temporal order of these holes in the first phase?"; e.g. [4, 3].
Returns:
list of serial order positions (in this example: [3, 1]).
"""
return [hole_sequence.index(h) + 1 for h in holes]
def serial_order_to_spatial(hole_sequence: List[int],
seq_positions: List[int]) -> List[int]:
"""
Converts a first-phase hole sequence and a list of serial order positions
(at the choice phase) into a list of spatial holes at test.
Args:
hole_sequence: ordered list of spatial hole numbers to be presented
in the first phase of the task, e.g. [3, 1, 4].
seq_positions: list of serial orders, e.g [1, 3] for the first and
third in the sequence.
Returns:
list of spatial hole numbers (e.g. [3, 4] in this example).
"""
return [hole_sequence[i - 1] for i in seq_positions]
class ChoiceHoleRestriction(object):
"""
Class to describe choice hole restrictions.
:ivar permissible_combinations: variable of type ``Set[Tuple[int]]``, where
the tuples are sorted sequences of hole numbers. If the set is not
empty, then only such combinations are allowed.
"""
DEFAULT_HOLE_SEPARATOR = ","
DEFAULT_GROUP_SEPARATOR = ";" # NB ";" trickier from Bash command line
def __init__(
self,
# String-based init:
description: str = "",
hole_separator: str = DEFAULT_HOLE_SEPARATOR,
group_separator: str = DEFAULT_GROUP_SEPARATOR,
# Hole-based init:
permissible_combinations: List[List[int]] = None) -> None:
"""
Args:
description: textual description like "1,3; 2,4" to restrict
to the combinations of "hole 1 versus hole 3" and "hole 2 versus
hole 4".
hole_separator: string used to separate holes in a group
(usually ",").
group_separator: string used to separate groups
(usually ";").
permissible_combinations: list of lists of spatial hole numbers,
as an alternative to using ``description``. Use one or the
other.
Raises:
argparse.ArgumentTypeError: if its arguments are invalid.
"""
def assert_hole_ok(hole_: int) -> None:
if not (MIN_HOLE_NUMBER <= hole_ <= MAX_HOLE_NUMBER):
raise ArgumentTypeError(
"Bad hole number {} (must be in range {}-{})".format(
hole_, MIN_HOLE_NUMBER, MAX_HOLE_NUMBER))
if description and permissible_combinations:
raise ArgumentTypeError(
"Specify description or permissible_combinations, "
"but not both"
)
permissible_combinations = permissible_combinations or [] # type: List[List[int]] # noqa
self.permissible_combinations = set() # type: Set[Tuple[int]]
# NOTE: can't add lists to a set (TypeError: unhashable type: 'list')
if description:
# Initialize from string
for group_string in description.split(group_separator):
holes = [] # type: List[int]
for hole_string in group_string.split(hole_separator):
try:
hole = int(hole_string.strip())
except (ValueError, TypeError):
raise ArgumentTypeError("Not an integer: {!r}".format(
hole_string))
assert_hole_ok(hole)
holes.append(hole)
if len(holes) != N_HOLES_FOR_CHOICE:
raise ArgumentTypeError(
"In description {!r}, hole group {!r} must be of "
"length {}, but isn't".format(
description, group_string, N_HOLES_FOR_CHOICE)
)
holes.sort()
self.permissible_combinations.add(tuple(holes))
elif permissible_combinations:
# Initialize from list of lists of holes
for group in permissible_combinations:
for hole in group:
if not isinstance(hole, int):
raise ArgumentTypeError(
"Not an integer: {!r}".format(hole))
assert_hole_ok(hole)
holes = sorted(group)
self.permissible_combinations.add(tuple(holes))
# Check values are sensible:
for holes in self.permissible_combinations:
if len(holes) != len(set(holes)):
raise ArgumentTypeError("No duplicates permitted; problem was "
"{!r}".format(holes))
def description(self) -> str:
"""
Returns the description that can be used to recreate this object.
"""
groupsep = self.DEFAULT_GROUP_SEPARATOR + " "
holesep = self.DEFAULT_HOLE_SEPARATOR
if self.permissible_combinations:
return groupsep.join(
holesep.join(str(h) for h in holes)
for holes in sorted(self.permissible_combinations)
)
return ""
def __str__(self) -> str:
return "ChoiceHoleRestriction({!r})".format(self.description())
def permissible(self, choice_holes: Iterable[int]) -> bool:
"""
Is the supplied list of choice holes compatible with the restrictions?
Args:
choice_holes: list of spatial holes.
"""
if not self.permissible_combinations:
# No restrictions; OK
return True
sorted_holes = tuple(sorted(choice_holes))
return sorted_holes in self.permissible_combinations
class ChoiceHoleRestrictionType(TypeDecorator):
"""
SQLAlchemy data type to store :class:`.ChoiceHoleRestriction` in a
database. See http://docs.sqlalchemy.org/en/latest/core/custom_types.html.
"""
impl = String(length=MAX_HOLE_OR_SERIALPOS_PAIR_DEFINITION_STRING_LENGTH)
def process_bind_param(
self, value: Any,
dialect: DefaultDialect) -> Optional[str]:
"""
Converts a bound Python parameter to the database value.
Args:
value: should be a :class:`.ChoiceHoleRestriction` or None
dialect: SQLAlchemy database dialect.
Returns:
string (outbound to database)
"""
if not value:
return value
if not isinstance(value, ChoiceHoleRestriction):
raise ValueError("Bad object arriving at "
"ChoiceHoleRestrictionType.process_bind_param: "
"{!r}".format(value))
return value.description()
def process_result_value(
self, value: Any,
dialect: DefaultDialect) -> Optional[ChoiceHoleRestriction]:
"""
Receive a result-row column value to be converted.
Args:
value: data fetched from the database (will be a string).
dialect: SQLAlchemy database dialect.
Returns:
a :class:`.ChoiceHoleRestriction` object if the string is valid
"""
if not value:
return None
try:
return ChoiceHoleRestriction(description=value)
except ArgumentTypeError:
log.debug("Bad value received from database to "
"ChoiceHoleRestrictionType.process_result_value: "
"{!r}".format(value))
return None
def process_literal_param(self, value: Any, dialect: DefaultDialect) -> str:
"""
Receive a literal parameter value to be rendered inline within
a statement.
(An abstract method of ``TypeDecorator``, so we should implement it.)
Args:
value: a Python value
dialect: SQLAlchemy database dialect.
Returns:
a string to be baked into some SQL
"""
return str(value)
@property
def python_type(self) -> type:
"""
Returns the Python type object expected to be returned by instances of
this type, if known. It's :class:`.ChoiceHoleRestriction`.
"""
return ChoiceHoleRestriction
class SerialPosRestriction(object):
"""
Class to describe restrictions on the serial order positions offered at
the choice phase.
:ivar permissible_combinations: variable of type ``Set[Tuple[int]]``, where
the tuples are sorted sequences of serial order position numbers (1
being the first). If the set is not empty, then only such combinations
are allowed.
"""
DEFAULT_POS_SEPARATOR = ","
DEFAULT_GROUP_SEPARATOR = ";" # NB ";" trickier from Bash command line
def __init__(
self,
# String-based init:
description: str = "",
position_separator: str = DEFAULT_POS_SEPARATOR,
group_separator: str = DEFAULT_GROUP_SEPARATOR,
# Hole-based init:
permissible_combinations: List[List[int]] = None) -> None:
"""
Args:
description: textual description like "1,3; 2,3" to restrict
to the combinations of "serial position 1 versus 3" and "serial
position 2 versus 3".
position_separator: string used to separate positions
(usually ",").
group_separator: string used to separate groups
(usually ";").
permissible_combinations: list of lists of serial order positions,
as an alternative to using ``description``. Use one or the
other.
Raises:
argparse.ArgumentTypeError: if its arguments are invalid.
"""
def assert_position_ok(pos_: int) -> None:
if not (MIN_SERIAL_ORDER_POSITION <= pos_ <=
MAX_SERIAL_ORDER_POSITION):
raise ArgumentTypeError(
"Bad serial order position {} (must be in range "
"{}-{})".format(pos_, MIN_SERIAL_ORDER_POSITION,
MAX_SERIAL_ORDER_POSITION))
if description and permissible_combinations:
raise ArgumentTypeError(
"Specify description or permissible_combinations, "
"but not both"
)
permissible_combinations = permissible_combinations or [] # type: List[List[int]] # noqa
self.permissible_combinations = set() # type: Set[Tuple[int]]
# NOTE: can't add lists to a set (TypeError: unhashable type: | |
import datetime
import enum
import gc
import itertools
import pickle
import pickletools
import string
import sys
import uuid
from distutils.version import StrictVersion
import pytest
import quickle
BATCHSIZE = 1000
def test_picklebuffer_is_shared():
assert pickle.PickleBuffer is quickle.PickleBuffer
def test_module_version():
StrictVersion(quickle.__version__)
def check(obj, sol=None):
if sol is None:
sol = obj
quick_res = quickle.dumps(obj)
obj2 = quickle.loads(quick_res)
assert obj2 == sol
assert type(obj2) is type(sol)
obj3 = pickle.loads(quick_res)
assert obj3 == sol
assert type(obj3) is type(sol)
pickle_res = pickle.dumps(obj, protocol=5)
obj4 = quickle.loads(pickle_res)
assert obj4 == sol
assert type(obj4) is type(sol)
def test_pickle_none():
check(None)
@pytest.mark.parametrize("value", [True, False])
def test_pickle_bool(value):
check(value)
@pytest.mark.parametrize("nbytes", [1, 2, 4, 8, 254, 255, 256, 257])
@pytest.mark.parametrize("negative", [False, True])
def test_pickle_int(nbytes, negative):
value = 2 ** (nbytes * 8 - 6)
if negative:
value *= -1
check(value)
@pytest.mark.parametrize(
"value",
[
0.0,
4.94e-324,
1e-310,
7e-308,
6.626e-34,
0.1,
0.5,
3.14,
263.44582062374053,
6.022e23,
1e30,
],
)
@pytest.mark.parametrize("negative", [False, True])
def test_pickle_float(value, negative):
if negative:
value *= -1
check(value)
@pytest.mark.parametrize("nbytes", [0, 10, 512])
def test_pickle_bytes(nbytes):
value = b"y" * nbytes
check(value)
@pytest.mark.parametrize("nbytes", [0, 10, 512])
def test_pickle_bytearray(nbytes):
value = bytearray(b"y" * nbytes)
check(value)
@pytest.mark.parametrize("nbytes", [0, 10, 512])
def test_pickle_unicode(nbytes):
value = "y" * nbytes
check(value)
@pytest.mark.parametrize(
"value",
["<\\u>", "<\\\u1234>", "<\n>", "<\\>", "\U00012345", "<\\\U00012345>", "<\udc80>"],
)
def test_pickle_unicode_edgecases(value):
check(value)
@pytest.mark.parametrize("n", [0, 1, 5, 100, BATCHSIZE + 10])
def test_pickle_set(n):
check(set(range(n)))
@pytest.mark.parametrize("n", [0, 1, 5, 100, BATCHSIZE + 10])
def test_pickle_frozenset(n):
check(frozenset(range(n)))
@pytest.mark.parametrize("n", [0, 1, 2, 3, 100, BATCHSIZE + 10])
def test_pickle_tuple(n):
check(tuple(range(n)))
def test_pickle_recursive_tuple():
obj = ([None],)
obj[0][0] = obj
quick_res = quickle.dumps(obj)
for loads in [quickle.loads, pickle.loads]:
obj2 = loads(quick_res)
assert isinstance(obj2, tuple)
assert obj2[0][0] is obj2
# Fix the cycle so `==` works, then test
obj2[0][0] = None
assert obj2 == ([None],)
@pytest.mark.parametrize("n", [0, 1, 5, 100, BATCHSIZE + 10])
def test_pickle_list(n):
check(list(range(n)))
def test_pickle_recursive_list():
# self referential
obj = []
obj.append(obj)
quick_res = quickle.dumps(obj)
for loads in [quickle.loads, pickle.loads]:
obj2 = loads(quick_res)
assert isinstance(obj2, list)
assert obj2[0] is obj2
assert len(obj2) == 1
# one level removed
obj = [[None]]
obj[0][0] = obj
quick_res = quickle.dumps(obj)
for loads in [quickle.loads, pickle.loads]:
obj2 = loads(quick_res)
assert isinstance(obj2, list)
assert obj2[0][0] is obj2
# Fix the cycle so `==` works, then test
obj2[0][0] = None
assert obj2 == [[None]]
@pytest.mark.parametrize("n", [0, 1, 5, 100, BATCHSIZE + 10])
def test_pickle_dict(n):
value = dict(
zip(itertools.product(string.ascii_letters, string.ascii_letters), range(n))
)
check(value)
def test_pickle_recursive_dict():
# self referential
obj = {}
obj[0] = obj
quick_res = quickle.dumps(obj)
for loads in [quickle.loads, pickle.loads]:
obj2 = loads(quick_res)
assert isinstance(obj2, dict)
assert obj2[0] is obj2
assert len(obj2) == 1
# one level removed
obj = {0: []}
obj[0].append(obj)
quick_res = quickle.dumps(obj)
for loads in [quickle.loads, pickle.loads]:
obj2 = loads(quick_res)
assert isinstance(obj2, dict)
assert obj2[0][0] is obj2
# Fix the cycle so `==` works, then test
obj2[0].pop()
assert obj2 == {0: []}
def test_pickle_highly_nested_list():
obj = []
for _ in range(66):
obj = [obj]
check(obj)
def test_pickle_large_memo():
obj = [[1, 2, 3] for _ in range(2000)]
check(obj)
def test_pickle_a_little_bit_of_everything():
obj = [
1,
1.5,
True,
False,
None,
"hello",
b"hello",
bytearray(b"hello"),
(1, 2, 3),
[1, 2, 3],
{"hello": "world"},
{1, 2, 3},
frozenset([1, 2, 3]),
]
check(obj)
def opcode_in_pickle(code, pickle):
for op, _, _ in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
@pytest.mark.parametrize("memoize", [True, False])
def test_pickle_memoize_class_setting(memoize):
obj = [[1], [2]]
enc = quickle.Encoder(memoize=memoize)
assert enc.memoize == memoize
# immutable
with pytest.raises(AttributeError):
enc.memoize = not memoize
assert enc.memoize == memoize
# default taken from class
res = enc.dumps(obj)
assert opcode_in_pickle(pickle.MEMOIZE, res) == memoize
assert enc.memoize == memoize
# specify None, no change
res = enc.dumps(obj, memoize=None)
assert opcode_in_pickle(pickle.MEMOIZE, res) == memoize
assert enc.memoize == memoize
# specify same, no change
res = enc.dumps(obj, memoize=memoize)
assert opcode_in_pickle(pickle.MEMOIZE, res) == memoize
assert enc.memoize == memoize
# overridden by opposite value
res = enc.dumps(obj, memoize=(not memoize))
assert opcode_in_pickle(pickle.MEMOIZE, res) != memoize
assert enc.memoize == memoize
@pytest.mark.parametrize("memoize", [True, False])
def test_pickle_memoize_function_settings(memoize):
obj = [[1], [2]]
res = quickle.dumps(obj, memoize=memoize)
assert opcode_in_pickle(pickle.MEMOIZE, res) == memoize
obj2 = quickle.loads(res)
assert obj == obj2
obj = [[]] * 2
res = quickle.dumps(obj, memoize=memoize)
assert opcode_in_pickle(pickle.MEMOIZE, res) == memoize
obj2 = quickle.loads(res)
assert obj == obj2
assert (obj2[0] is not obj2[1]) == (not memoize)
def test_pickle_memoize_false_recursion_error():
obj = []
obj.append(obj)
with pytest.raises(RecursionError):
quickle.dumps(obj, memoize=False)
@pytest.mark.parametrize("cls", [bytes, bytearray])
def test_pickle_picklebuffer_no_callback(cls):
sol = cls(b"hello")
obj = quickle.PickleBuffer(sol)
check(obj, sol)
@pytest.mark.parametrize("cls", [bytes, bytearray])
def test_pickler_collect_buffers_true(cls):
data = cls(b"hello")
pbuf = quickle.PickleBuffer(data)
enc = quickle.Encoder(collect_buffers=True)
assert enc.collect_buffers
with pytest.raises(AttributeError):
enc.collect_buffers = False
# No buffers present returns None
res, buffers = enc.dumps(data)
assert buffers is None
assert quickle.loads(res) == data
# Buffers are collected and returned
res, buffers = enc.dumps(pbuf)
assert buffers == [pbuf]
assert quickle.loads(res, buffers=buffers) is pbuf
# Override None uses default
res, buffers = enc.dumps(pbuf, collect_buffers=None)
assert buffers == [pbuf]
assert quickle.loads(res, buffers=buffers) is pbuf
# Override True is same as default
res, buffers = enc.dumps(pbuf, collect_buffers=True)
assert buffers == [pbuf]
assert quickle.loads(res, buffers=buffers) is pbuf
# Override False disables buffer collecting
res = enc.dumps(pbuf, collect_buffers=False)
assert quickle.loads(res) == data
# Override doesn't persist
res, buffers = enc.dumps(pbuf)
assert buffers == [pbuf]
assert quickle.loads(res, buffers=buffers) is pbuf
@pytest.mark.parametrize("cls", [bytes, bytearray])
def test_pickler_collect_buffers_false(cls):
data = cls(b"hello")
pbuf = quickle.PickleBuffer(data)
enc = quickle.Encoder(collect_buffers=False)
assert not enc.collect_buffers
with pytest.raises(AttributeError):
enc.collect_buffers = True
# By default buffers are serialized in-band
res = enc.dumps(pbuf)
assert quickle.loads(res) == data
# Override None uses default
res = enc.dumps(pbuf, collect_buffers=None)
assert quickle.loads(res) == data
# Override False is the same as default
res = enc.dumps(pbuf, collect_buffers=False)
assert quickle.loads(res) == data
# Override True works
res, buffers = enc.dumps(pbuf, collect_buffers=True)
assert buffers == [pbuf]
assert quickle.loads(res, buffers=buffers) is pbuf
# If no buffers present, output is None
res, buffers = enc.dumps(data, collect_buffers=True)
assert buffers is None
assert quickle.loads(res, buffers=buffers) == data
# Override doesn't persist
res = enc.dumps(pbuf)
assert quickle.loads(res) == data
@pytest.mark.parametrize("cls", [bytes, bytearray])
def test_quickle_pickle_collect_buffers_true_compatibility(cls):
data = cls(b"hello")
pbuf = quickle.PickleBuffer(data)
# quickle -> pickle
quick_res, quick_buffers = quickle.dumps(pbuf, collect_buffers=True)
obj = pickle.loads(quick_res, buffers=quick_buffers)
assert obj is pbuf
# pickle -> quickle
pickle_buffers = []
pickle_res = pickle.dumps(pbuf, buffer_callback=pickle_buffers.append, protocol=5)
obj = quickle.loads(pickle_res, buffers=pickle_buffers)
assert obj is pbuf
@pytest.mark.parametrize("cls", [bytes, bytearray])
def test_quickle_pickle_collect_buffers_false_compatibility(cls):
data = cls(b"hello")
pbuf = quickle.PickleBuffer(data)
# quickle -> pickle
quick_res = quickle.dumps(pbuf)
obj = pickle.loads(quick_res)
assert obj == data
# pickle -> quickle
pickle_res = pickle.dumps(pbuf, protocol=5)
obj = quickle.loads(pickle_res)
assert obj == data
def test_loads_buffers_errors():
obj = quickle.PickleBuffer(b"hello")
res, _ = quickle.dumps(obj, collect_buffers=True)
with pytest.raises(TypeError):
quickle.loads(res, buffers=object())
with pytest.raises(quickle.DecodingError):
quickle.loads(res, buffers=[])
@pytest.mark.parametrize("value", [object(), object, sum, itertools.count])
def test_dumps_and_loads_unpickleable_types(value):
with pytest.raises(TypeError):
quickle.dumps(value)
o = pickle.dumps(value, protocol=5)
with pytest.raises(quickle.DecodingError):
quickle.loads(o)
def test_loads_truncated_input():
data = quickle.dumps([1, 2, 3])
with pytest.raises(quickle.DecodingError):
quickle.loads(data[:-2])
def test_loads_bad_pickle():
with pytest.raises(quickle.DecodingError):
quickle.loads(b"this isn't valid at all")
def test_getsizeof():
a = sys.getsizeof(quickle.Encoder(write_buffer_size=64))
b = sys.getsizeof(quickle.Encoder(write_buffer_size=128))
assert b > a
# Smoketest
sys.getsizeof(quickle.Decoder())
@pytest.mark.parametrize(
"enc",
[
# bad stacks
b".", # STOP
b"0", # POP
b"1", # POP_MARK
b"a", # APPEND
b"Na",
b"e", # APPENDS
b"(e",
b"s", # SETITEM
b"Ns",
b"NNs",
b"t", # TUPLE
b"u", # SETITEMS
b"(u",
b"}(Nu",
b"\x85", # TUPLE1
b"\x86", # TUPLE2
b"N\x86",
b"\x87", # TUPLE3
b"N\x87",
b"NN\x87",
b"\x90", # ADDITEMS
b"(\x90",
b"\x91", # FROZENSET
b"\x94", # MEMOIZE
# bad marks
b"N(.", # STOP
b"]N(a", # APPEND
b"}NN(s", # SETITEM
b"}N(Ns",
b"}(NNs",
b"}((u", # SETITEMS
b"N(\x85", # TUPLE1
b"NN(\x86", # TUPLE2
b"N(N\x86",
b"NNN(\x87", # TUPLE3
b"NN(N\x87",
b"N(NN\x87",
b"]((\x90", # ADDITEMS
b"N(\x94", # MEMOIZE
],
)
def test_bad_stack_or_mark(enc):
with pytest.raises(quickle.DecodingError):
quickle.loads(enc)
@pytest.mark.parametrize(
"enc",
[
b"B", # BINBYTES
b"B\x03\x00\x00",
b"B\x03\x00\x00\x00",
b"B\x03\x00\x00\x00ab",
b"C", # SHORT_BINBYTES
b"C\x03",
b"C\x03ab",
b"G", # BINFLOAT
b"G\x00\x00\x00\x00\x00\x00\x00",
b"J", # BININT
b"J\x00\x00\x00",
b"K", # BININT1
b"M", # BININT2
b"M\x00",
b"T", # BINSTRING
b"T\x03\x00\x00",
b"T\x03\x00\x00\x00",
b"T\x03\x00\x00\x00ab",
b"U", # SHORT_BINSTRING
b"U\x03",
b"U\x03ab",
b"X", # BINUNICODE
b"X\x03\x00\x00",
b"X\x03\x00\x00\x00",
b"X\x03\x00\x00\x00ab",
b"Nh", # BINGET
b"Nj", # LONG_BINGET
b"Nj\x00\x00\x00",
b"Nr\x00\x00\x00",
b"\x80", # PROTO
b"\x8a", # LONG1
b"\x8b", # LONG4
b"\x8b\x00\x00\x00",
b"\x8c", # SHORT_BINUNICODE
b"\x8c\x03",
b"\x8c\x03ab",
b"\x8d", # BINUNICODE8
b"\x8d\x03\x00\x00\x00\x00\x00\x00",
b"\x8d\x03\x00\x00\x00\x00\x00\x00\x00",
b"\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab",
b"\x8e", # BINBYTES8
b"\x8e\x03\x00\x00\x00\x00\x00\x00",
b"\x8e\x03\x00\x00\x00\x00\x00\x00\x00",
b"\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab",
b"\x96", # BYTEARRAY8
b"\x96\x03\x00\x00\x00\x00\x00\x00",
b"\x96\x03\x00\x00\x00\x00\x00\x00\x00",
b"\x96\x03\x00\x00\x00\x00\x00\x00\x00ab",
b"\x95", # FRAME
b"\x95\x02\x00\x00\x00\x00\x00\x00",
b"\x95\x02\x00\x00\x00\x00\x00\x00\x00",
b"\x95\x02\x00\x00\x00\x00\x00\x00\x00N",
],
)
def test_truncated_data(enc):
with pytest.raises(quickle.DecodingError):
quickle.loads(enc)
class MyStruct(quickle.Struct):
x: object
y: object
class MyStruct2(quickle.Struct):
x: object
y: object = 1
z: object = []
z2: object = 3
class MyStruct3(quickle.Struct):
x: object
y: object
z: object
def test_pickler_unpickler_registry_kwarg_errors():
with pytest.raises(TypeError, match="registry must be a list or a dict"):
quickle.Encoder(registry="bad")
with pytest.raises(TypeError, match="an integer is required"):
quickle.Encoder(registry={MyStruct: 1.0})
with pytest.raises(ValueError, | |
run__fin_rl():
# env = FinanceMultiStockEnv() # 2020-12-24
#
# from AgentZoo import AgentPPO
#
# args = Arguments(rl_agent=AgentPPO, env=env)
# args.eval_times1 = 1
# args.eval_times2 = 1
# args.rollout_num = 4
# args.if_break_early = False
#
# args.reward_scale = 2 ** 0 # (0) 1.1 ~ 15 (19)
# args.break_step = int(5e6 * 4) # 5e6 (15e6) UsedTime: 4,000s (12,000s)
# args.net_dim = 2 ** 8
# args.max_step = 1699
# args.max_memo = 1699 * 16
# args.batch_size = 2 ** 10
# args.repeat_times = 2 ** 4
# args.init_for_training()
# train_agent_mp(args) # train_agent(args)
# exit()
#
# # from AgentZoo import AgentModSAC
# #
# # args = Arguments(rl_agent=AgentModSAC, env=env) # much slower than on-policy trajectory
# # args.eval_times1 = 1
# # args.eval_times2 = 2
# #
# # args.break_step = 2 ** 22 # UsedTime:
# # args.net_dim = 2 ** 7
# # args.max_memo = 2 ** 18
# # args.batch_size = 2 ** 8
# # args.init_for_training()
# # train_agent_mp(args) # train_agent(args)
#
#
# def train__car_racing__pixel_level_state2d():
# from AgentZoo import AgentPPO
#
# '''DEMO 4: Fix gym Box2D env CarRacing-v0 (pixel-level 2D-state, continuous action) using PPO'''
# import gym # gym of OpenAI is not necessary for ElegantRL (even RL)
# gym.logger.set_level(40) # Block warning: 'WARN: Box bound precision lowered by casting to float32'
# env = gym.make('CarRacing-v0')
# env = fix_car_racing_env(env)
#
# args = Arguments(rl_agent=AgentPPO, env=env, gpu_id=None)
# args.if_break_early = True
# args.eval_times2 = 1
# args.eval_times2 = 3 # CarRacing Env is so slow. The GPU-util is low while training CarRacing.
# args.rollout_num = 4 # (num, step, time) (8, 1e5, 1360) (4, 1e4, 1860)
# args.random_seed += 1943
#
# args.break_step = int(5e5 * 4) # (1e5) 2e5 4e5 (8e5) used time (7,000s) 10ks 30ks (60ks)
# # Sometimes bad luck (5%), it reach 300 score in 5e5 steps and don't increase.
# # You just need to change the random seed and retrain.
# args.reward_scale = 2 ** -2 # (-1) 50 ~ 700 ~ 900 (1001)
# args.max_memo = 2 ** 11
# args.batch_size = 2 ** 7
# args.repeat_times = 2 ** 4
# args.net_dim = 2 ** 7
# args.max_step = 2 ** 10
# args.show_gap = 2 ** 8 # for Recorder
# args.init_for_training()
# train_agent_mp(args) # train_agent(args)
# exit()
'''single process training'''
def train_and_evaluate(args):
args.init_before_training()
cwd = args.cwd
env = args.env
env_eval = args.env_eval
agent_id = args.gpu_id
agent_rl = args.agent_rl # basic arguments
gamma = args.gamma # training arguments
net_dim = args.net_dim
max_memo = args.max_memo
target_step = args.target_step
batch_size = args.batch_size
repeat_times = args.repeat_times
reward_scale = args.reward_scale
if_per = args.if_per
show_gap = args.show_gap # evaluate arguments
eval_times1 = args.eval_times1
eval_times2 = args.eval_times2
break_step = args.break_step
if_break_early = args.if_break_early
env_eval = deepcopy(env) if env_eval is None else deepcopy(env_eval)
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: env'''
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
max_step = env.max_step
env_eval = deepcopy(env) if env_eval is None else deepcopy(env_eval)
'''init: Agent, Evaluator, ReplayBuffer'''
agent = agent_rl(net_dim, state_dim, action_dim) # build AgentRL
agent.state = env.reset()
evaluator = Evaluator(cwd=cwd, agent_id=agent_id, device=agent.device, env=env_eval,
eval_times1=eval_times1, eval_times2=eval_times2, show_gap=show_gap) # build Evaluator
if_on_policy = agent_rl.__name__ in {'AgentPPO', 'AgentGaePPO'}
buffer = ReplayBuffer(max_memo + max_step, state_dim, if_on_policy=if_on_policy, if_per=if_per,
action_dim=1 if if_discrete else action_dim) # build experience replay buffer
if if_on_policy:
steps = 0
else:
with torch.no_grad(): # update replay buffer
steps = _explore_before_train(env, buffer, target_step, reward_scale, gamma)
agent.update_net(buffer, target_step, batch_size, repeat_times) # pre-training and hard update
agent.act_target.load_state_dict(agent.act.state_dict()) if 'act_target' in dir(agent) else None
total_step = steps
if_solve = False
while not ((if_break_early and if_solve)
or total_step > break_step
or os.path.exists(f'{cwd}/stop')):
with torch.no_grad(): # speed up running
steps = agent.update_buffer(env, buffer, target_step, reward_scale, gamma)
total_step += steps
obj_a, obj_c = agent.update_net(buffer, target_step, batch_size, repeat_times)
with torch.no_grad(): # speed up running
if_solve = evaluator.evaluate_act__save_checkpoint(agent.act, steps, obj_a, obj_c)
'''multiprocessing training'''
def train_and_evaluate__multiprocessing(args):
args.init_before_training()
act_workers = args.rollout_num
import multiprocessing as mp # Python built-in multiprocessing library
pipe1_eva, pipe2_eva = mp.Pipe() # Pipe() for Process mp_evaluate_agent()
pipe2_exp_list = list() # Pipe() for Process mp_explore_in_env()
process_train = mp.Process(target=mp__update_params, args=(args, pipe2_eva, pipe2_exp_list))
process_evaluate = mp.Process(target=mp_evaluate_agent, args=(args, pipe1_eva))
process = [process_train, process_evaluate]
for worker_id in range(act_workers):
exp_pipe1, exp_pipe2 = mp.Pipe(duplex=True)
pipe2_exp_list.append(exp_pipe1)
process.append(mp.Process(target=mp_explore_in_env, args=(args, exp_pipe2, worker_id)))
[p.start() for p in process]
process_train.join()
process_evaluate.join()
[p.terminate() for p in process]
print('\n')
def mp__update_params(args, pipe1_eva, pipe1_exp_list):
agent_rl = args.agent_rl # basic arguments
env = args.env
cwd = args.cwd
rollout_num = args.rollout_num
gamma = args.gamma # training arguments
net_dim = args.net_dim
max_memo = args.max_memo
target_step = args.target_step
batch_size = args.batch_size
repeat_times = args.repeat_times
reward_scale = args.reward_scale
break_step = args.break_step
if_break_early = args.if_break_early
if_per=args.if_per
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: env'''
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
max_step = env.max_step
'''build agent'''
agent = agent_rl(net_dim, state_dim, action_dim) # build AgentRL
pipe1_eva.send(agent.act) # act = pipe2_eva.recv()
if_on_policy = agent_rl.__name__ in {'AgentPPO', 'AgentGaePPO'}
buffer_mp = ReplayBufferMP(max_memo + max_step * rollout_num, state_dim,
if_on_policy=if_on_policy,
if_per=if_per,
action_dim=1 if if_discrete else action_dim,
rollout_num=rollout_num) # build experience replay buffer
steps = 0
if not if_on_policy:
with torch.no_grad(): # update replay buffer
for _buffer in buffer_mp.buffers:
steps += _explore_before_train(env, _buffer, target_step // rollout_num, reward_scale, gamma)
agent.update_net(buffer_mp, target_step, batch_size, repeat_times) # pre-training and hard update
agent.act_target.load_state_dict(agent.act.state_dict()) if 'act_target' in dir(agent) else None
total_step = steps
pipe1_eva.send((agent.act, steps, 0, 0.5)) # pipe1_eva (act, steps, obj_a, obj_c)
if_solve = False
while not ((if_break_early and if_solve)
or total_step > break_step
or os.path.exists(f'{cwd}/stop')):
'''update ReplayBuffer'''
for i in range(rollout_num):
pipe1_exp = pipe1_exp_list[i]
pipe1_exp.send(agent.act)
# agent.act = pipe2_exp.recv()
# pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
buf_state, buf_other = pipe1_exp.recv()
steps = len(buf_state)
total_step += steps
buffer_mp.extend_memo_mp(buf_state, buf_other, i)
'''update network parameters'''
obj_a, obj_c = agent.update_net(buffer_mp, target_step, batch_size, repeat_times)
'''saves the agent with max reward'''
pipe1_eva.send((agent.act, steps, obj_a, obj_c)) # pipe1_eva act_cpu
if_solve = pipe1_eva.recv()
if pipe1_eva.poll():
if_solve = pipe1_eva.recv() # pipe2_eva.send(if_solve)
buffer_mp.print_state_norm(env.neg_state_avg if hasattr(env, 'neg_state_avg') else None,
env.div_state_std if hasattr(env, 'div_state_std') else None) # 2020-12-12
pipe1_eva.send('stop') # eva_pipe stop # send to mp_evaluate_agent
time.sleep(4)
# print('; quit: params')
def mp_explore_in_env(args, pipe2_exp, worker_id):
env = args.env
reward_scale = args.reward_scale
gamma = args.gamma
random_seed = args.random_seed
agent_rl = args.agent_rl
net_dim = args.net_dim
max_memo = args.max_memo
target_step = args.target_step
rollout_num = args.rollout_num
del args
torch.manual_seed(random_seed + worker_id)
np.random.seed(random_seed + worker_id)
'''init: env'''
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
max_step = env.max_step
'''build agent'''
agent = agent_rl(state_dim, action_dim, net_dim) # training agent
agent.state = env.reset()
# agent.device = torch.device('cpu') # env_cpu--act_cpu a little faster than env_cpu--act_gpu, but high cpu-util
'''build replay buffer, init: total_step, reward_avg'''
if_on_policy = bool(agent_rl.__name__ in {'AgentPPO', 'AgentGaePPO', 'AgentInterPPO'})
buffer = ReplayBuffer(max_memo // rollout_num + max_step, state_dim, if_on_policy=if_on_policy,
action_dim=1 if if_discrete else action_dim) # build experience replay buffer
exp_step = target_step // rollout_num
with torch.no_grad():
while True:
# pipe1_exp.send(agent.act)
agent.act = pipe2_exp.recv()
agent.update_buffer(env, buffer, exp_step, reward_scale, gamma)
buffer.update__now_len__before_sample()
pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
# buf_state, buf_other = pipe1_exp.recv()
def mp_evaluate_agent(args, pipe2_eva):
env = args.env
env_eval = args.env_eval
cwd = args.cwd
agent_id = args.gpu_id
show_gap = args.show_gap # evaluate arguments
eval_times1 = args.eval_times1
eval_times2 = args.eval_times2
env_eval = deepcopy(env) if env_eval is None else deepcopy(env_eval)
device = torch.device("cpu")
evaluator = Evaluator(cwd=cwd, agent_id=agent_id, device=device, env=env_eval,
eval_times1=eval_times1, eval_times2=eval_times2, show_gap=show_gap) # build Evaluator
'''act_cpu without gradient for pipe1_eva'''
act = pipe2_eva.recv() # pipe1_eva.send(agent.act)
act_cpu = deepcopy(act).to(torch.device("cpu")) # for pipe1_eva
[setattr(param, 'requires_grad', False) for param in act_cpu.parameters()]
with torch.no_grad(): # speed up running
act, steps, obj_a, obj_c = pipe2_eva.recv() # pipe2_eva (act, steps, obj_a, obj_c)
if_loop = True
while if_loop:
'''update actor'''
while not pipe2_eva.poll(): # wait until pipe2_eva not empty
time.sleep(1)
steps_sum = 0
while pipe2_eva.poll(): # receive the latest object from pipe
q_i_eva_get = pipe2_eva.recv() # pipe2_eva act
if q_i_eva_get == 'stop':
if_loop = False
break
act, steps, obj_a, obj_c = q_i_eva_get
steps_sum += steps
act_cpu.load_state_dict(act.state_dict())
if_solve = evaluator.evaluate_act__save_checkpoint(act_cpu, steps_sum, obj_a, obj_c)
pipe2_eva.send(if_solve) # if_solve = pipe1_eva.recv()
evaluator.save_npy__draw_plot()
'''save the model, rename the directory'''
print(f'| SavedDir: {cwd}\n'
f'| UsedTime: {time.time() - evaluator.start_time:.0f}')
while pipe2_eva.poll(): # empty the pipe
pipe2_eva.recv()
# print('; quit: evaluate')
'''utils'''
class Evaluator:
def __init__(self, cwd, agent_id, eval_times1, eval_times2, show_gap, env, device):
self.recorder = [(0., -np.inf, 0., 0., 0.), ] # total_step, r_avg, r_std, obj_a, obj_c
self.r_max = -np.inf
self.total_step = 0
self.cwd = cwd # constant
self.device = device
self.agent_id | |
<filename>api/drf_views.py<gh_stars>0
from rest_framework.status import HTTP_201_CREATED, HTTP_200_OK
from rest_framework.generics import GenericAPIView, CreateAPIView, UpdateAPIView
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from rest_framework.decorators import action
from django_filters import rest_framework as filters
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from .event_sources import SOURCES
from .exceptions import BadRequest
from .view_filters import ListFilter
from .visibility_class import ReadOnlyVisibilityViewset
from deployments.models import Personnel
from .models import (
DisasterType,
Region,
RegionKeyFigure,
RegionSnippet,
Country,
CountryKeyFigure,
CountrySnippet,
District,
Snippet,
Event,
Snippet,
SituationReport,
SituationReportType,
Appeal,
AppealDocument,
Profile,
FieldReport,
FieldReportContact,
Action,
ActionsTaken,
Source,
SourceType,
VisibilityChoices,
RequestChoices,
)
from .serializers import (
ActionSerializer,
DisasterTypeSerializer,
RegionSerializer,
RegionKeyFigureSerializer,
RegionSnippetSerializer,
RegionRelationSerializer,
CountrySerializer,
CountryKeyFigureSerializer,
CountrySnippetSerializer,
CountryRelationSerializer,
DistrictSerializer,
MiniDistrictSerializer,
SnippetSerializer,
MiniEventSerializer,
ListEventSerializer,
ListEventDeploymentsSerializer,
DetailEventSerializer,
SituationReportSerializer,
SituationReportTypeSerializer,
AppealSerializer,
AppealDocumentSerializer,
UserSerializer,
ProfileSerializer,
ListFieldReportSerializer,
DetailFieldReportSerializer,
CreateFieldReportSerializer,
)
from .logger import logger
class EventDeploymentsViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = ListEventDeploymentsSerializer
def get_queryset(self):
return Personnel.objects.filter(
end_date__gt=timezone.now(),
).order_by().values(
'deployment__event_deployed_to', 'type',
).annotate(
id=models.F('deployment__event_deployed_to'),
deployments=models.Count('type')
).values('id', 'type', 'deployments')
class DisasterTypeViewset(viewsets.ReadOnlyModelViewSet):
queryset = DisasterType.objects.all()
serializer_class = DisasterTypeSerializer
class RegionViewset(viewsets.ReadOnlyModelViewSet):
queryset = Region.objects.all()
def get_serializer_class(self):
if self.action == 'list':
return RegionSerializer
return RegionRelationSerializer
class CountryFilter(filters.FilterSet):
region = filters.NumberFilter(name='region', lookup_expr='exact')
class Meta:
model = Country
fields = ('region',)
class CountryViewset(viewsets.ReadOnlyModelViewSet):
queryset = Country.objects.all()
filter_class = CountryFilter
def get_object(self):
pk = self.kwargs['pk']
try:
return Country.objects.get(pk=int(pk))
except ValueError:
# NOTE: If pk is not integer try searching for name or iso
country = Country.objects.filter(
models.Q(name__iexact=str(pk)) | models.Q(iso__iexact=str(pk))
)
if country.exists():
return country.first()
raise Country.DoesNotExist(
'Country matching query does not exist.'
)
def get_serializer_class(self):
if self.action == 'list':
return CountrySerializer
return CountryRelationSerializer
class RegionKeyFigureFilter(filters.FilterSet):
region = filters.NumberFilter(name='region', lookup_expr='exact')
class Meta:
model = RegionKeyFigure
fields = ('region',)
class RegionKeyFigureViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = RegionKeyFigureSerializer
filter_class = RegionKeyFigureFilter
visibility_model_class = RegionKeyFigure
class CountryKeyFigureFilter(filters.FilterSet):
country = filters.NumberFilter(name='country', lookup_expr='exact')
class Meta:
model = CountryKeyFigure
fields = ('country',)
class CountryKeyFigureViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = CountryKeyFigureSerializer
filter_class = CountryKeyFigureFilter
visibility_model_class = CountryKeyFigure
class RegionSnippetFilter(filters.FilterSet):
region = filters.NumberFilter(name='region', lookup_expr='exact')
class Meta:
model = RegionSnippet
fields = ('region',)
class RegionSnippetViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = RegionSnippetSerializer
filter_class = RegionSnippetFilter
visibility_model_class = RegionSnippet
class CountrySnippetFilter(filters.FilterSet):
country = filters.NumberFilter(name='country', lookup_expr='exact')
class Meta:
model = CountrySnippet
fields = ('country',)
class CountrySnippetViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = CountrySnippetSerializer
filter_class = CountrySnippetFilter
visibility_model_class = CountrySnippet
class DistrictFilter(filters.FilterSet):
class Meta:
model = District
fields = ('country',)
class DistrictViewset(viewsets.ReadOnlyModelViewSet):
queryset = District.objects.all()
filter_class = DistrictFilter
def get_serializer_class(self):
if self.action == 'list':
return MiniDistrictSerializer
else:
return DistrictSerializer
class EventFilter(filters.FilterSet):
dtype = filters.NumberFilter(name='dtype', lookup_expr='exact')
is_featured = filters.BooleanFilter(name='is_featured')
is_featured_region = filters.BooleanFilter(name='is_featured_region')
countries__in = ListFilter(name='countries__id')
regions__in = ListFilter(name='regions__id')
id = filters.NumberFilter(name='id', lookup_expr='exact')
class Meta:
model = Event
fields = {
'disaster_start_date': ('exact', 'gt', 'gte', 'lt', 'lte'),
'created_at': ('exact', 'gt', 'gte', 'lt', 'lte'),
}
class EventViewset(viewsets.ReadOnlyModelViewSet):
ordering_fields = (
'disaster_start_date', 'created_at', 'name', 'summary', 'num_affected', 'glide', 'ifrc_severity_level',
)
filter_class = EventFilter
def get_queryset(self):
if self.action == 'mini_events':
return Event.objects.values('id', 'name')
return Event.objects.all()
def get_serializer_class(self):
if self.action == 'mini_events':
return MiniEventSerializer
elif self.action == 'list':
return ListEventSerializer
else:
return DetailEventSerializer
@action(methods=['get'], detail=False, url_path='mini')
def mini_events(self, request):
return super().list(request)
class EventSnippetFilter(filters.FilterSet):
event = filters.NumberFilter(name='event', lookup_expr='exact')
class Meta:
model = Snippet
fields = ('event',)
class EventSnippetViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = SnippetSerializer
filter_class = EventSnippetFilter
visibility_model_class = Snippet
class SituationReportTypeViewset(viewsets.ReadOnlyModelViewSet):
queryset = SituationReportType.objects.all()
serializer_class = SituationReportTypeSerializer
ordering_fields = ('type',)
class SituationReportFilter(filters.FilterSet):
event = filters.NumberFilter(name='event', lookup_expr='exact')
type = filters.NumberFilter(name='type', lookup_expr='exact')
class Meta:
model = SituationReport
fields = {
'name': ('exact',),
'created_at': ('exact', 'gt', 'gte', 'lt', 'lte'),
}
class SituationReportViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
serializer_class = SituationReportSerializer
ordering_fields = ('created_at', 'name',)
filter_class = SituationReportFilter
visibility_model_class = SituationReport
class AppealFilter(filters.FilterSet):
atype = filters.NumberFilter(name='atype', lookup_expr='exact')
dtype = filters.NumberFilter(name='dtype', lookup_expr='exact')
country = filters.NumberFilter(name='country', lookup_expr='exact')
region = filters.NumberFilter(name='region', lookup_expr='exact')
code = filters.CharFilter(name='code', lookup_expr='exact')
status = filters.NumberFilter(name='status', lookup_expr='exact')
id = filters.NumberFilter(name='id', lookup_expr='exact')
class Meta:
model = Appeal
fields = {
'start_date': ('exact', 'gt', 'gte', 'lt', 'lte'),
'end_date': ('exact', 'gt', 'gte', 'lt', 'lte'),
}
class AppealViewset(viewsets.ReadOnlyModelViewSet):
queryset = Appeal.objects.all()
serializer_class = AppealSerializer
ordering_fields = ('start_date', 'end_date', 'name', 'aid', 'dtype', 'num_beneficiaries', 'amount_requested', 'amount_funded', 'status', 'atype', 'event',)
filter_class = AppealFilter
def remove_unconfirmed_event(self, obj):
if obj['needs_confirmation']:
obj['event'] = None
return obj
def remove_unconfirmed_events(self, objs):
return [self.remove_unconfirmed_event(obj) for obj in objs]
# Overwrite retrieve, list to exclude the event if it requires confirmation
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(self.remove_unconfirmed_events(serializer.data))
serializer = self.get_serializer(queryset, many=True)
return Response(self.remove_unconfirmed_events(serializer.data))
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(self.remove_unconfirmed_event(serializer.data))
class AppealDocumentFilter(filters.FilterSet):
appeal = filters.NumberFilter(name='appeal', lookup_expr='exact')
appeal__in = ListFilter(name='appeal__id')
class Meta:
model = AppealDocument
fields = {
'name': ('exact',),
'created_at': ('exact', 'gt', 'gte', 'lt', 'lte'),
}
class AppealDocumentViewset(viewsets.ReadOnlyModelViewSet):
queryset = AppealDocument.objects.all()
serializer_class = AppealDocumentSerializer
ordering_fields = ('created_at', 'name',)
filter_class = AppealDocumentFilter
class ProfileViewset(viewsets.ModelViewSet):
serializer_class = ProfileSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return Profile.objects.filter(user=self.request.user)
class UserViewset(viewsets.ModelViewSet):
serializer_class = UserSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return User.objects.filter(pk=self.request.user.pk)
class FieldReportFilter(filters.FilterSet):
dtype = filters.NumberFilter(name='dtype', lookup_expr='exact')
user = filters.NumberFilter(name='user', lookup_expr='exact')
countries__in = ListFilter(name='countries__id')
regions__in = ListFilter(name='regions__id')
id = filters.NumberFilter(name='id', lookup_expr='exact')
class Meta:
model = FieldReport
fields = {
'created_at': ('exact', 'gt', 'gte', 'lt', 'lte'),
'updated_at': ('exact', 'gt', 'gte', 'lt', 'lte'),
}
class FieldReportViewset(ReadOnlyVisibilityViewset):
authentication_classes = (TokenAuthentication,)
visibility_model_class = FieldReport
def get_serializer_class(self):
if self.action == 'list':
return ListFieldReportSerializer
else:
return DetailFieldReportSerializer
ordering_fields = ('summary', 'event', 'dtype', 'created_at', 'updated_at')
filter_class = FieldReportFilter
class ActionViewset(viewsets.ReadOnlyModelViewSet):
queryset = Action.objects.all()
serializer_class = ActionSerializer
class GenericFieldReportView(GenericAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = FieldReport.objects.all()
def serialize(self, data, instance=None):
# Replace integer values for Int Enum types.
# Otherwise, validation will fail.
# This applies to visibility and request choices.
if data['visibility'] == 2 or data['visibility'] == '2':
data['visibility'] = VisibilityChoices.IFRC
elif data['visibility'] == 3 or data['visibility'] == '3':
data['visibility'] = VisibilityChoices.PUBLIC
else:
data['visibility'] = VisibilityChoices.MEMBERSHIP
request_choices = [
'bulletin',
'dref',
'appeal',
'rdrt',
'fact',
'ifrc_staff',
'imminent_dref',
'forecast_based_action',
'eru_base_camp',
'eru_basic_health_care',
'eru_it_telecom',
'eru_logistics',
'eru_deployment_hospital',
'eru_referral_hospital',
'eru_relief',
'eru_water_sanitation_15',
'eru_water_sanitation_40',
'eru_water_sanitation_20',
]
for prop in request_choices:
if prop in data:
if data[prop] == 1 or data[prop] == '1':
data[prop] = RequestChoices.REQUESTED
elif data[prop] == 2 or data[prop] == '2':
data[prop] = RequestChoices.PLANNED
elif data[prop] == 3 or data[prop] == '3':
data[prop] = RequestChoices.COMPLETE
else:
data[prop] = RequestChoices.NO
if instance is not None:
serializer = CreateFieldReportSerializer(instance, data=data)
else:
serializer = CreateFieldReportSerializer(data=data)
return serializer
def map_foreign_key_relations(self, data):
# The request data object will come with a lot of relation mappings.
# For foreign key, we want to replace instance ID's with querysets.
# Query foreign key relations, these are attached on model save/update.
mappings = [
('user', User),
('dtype', DisasterType),
('event', Event),
]
for (prop, model) in mappings:
if prop in data and data[prop] is not None:
try:
data[prop] = model.objects.get(pk=data[prop])
except:
raise BadRequest('Valid %s is required' % prop)
elif prop is not 'event':
raise BadRequest('Valid %s is required' % prop)
return data
def map_many_to_many_relations(self, data):
# Query many-to-many mappings. These are removed from the data object,
# So they can be added later.
mappings = [
('countries', Country),
('regions', Region),
('districts', District),
]
locations = {}
for (prop, model) in mappings:
if prop in data and hasattr(data[prop], '__iter__') and len(data[prop]):
locations[prop] = list(data[prop])
if prop in data:
del data[prop]
# Sources, actions, and contacts
mappings = [
('actions_taken'),
('contacts'),
('sources'),
]
meta = {}
for (prop) in mappings:
if prop in data and hasattr(data[prop], '__iter__') and len(data[prop]):
meta[prop] = list(data[prop])
if prop in data:
del data[prop]
return data, locations, meta
def save_locations(self, instance, locations, is_update=False):
if is_update:
instance.districts.clear()
instance.countries.clear()
instance.regions.clear()
if 'districts' in locations:
instance.districts.add(*locations['districts'])
if 'countries' in locations:
instance.countries.add(*locations['countries'])
# Add countries in automatically, based on regions
countries = Country.objects.filter(pk__in=locations['countries'])
instance.regions.add(*[country.region for country in countries if (
country.region is not None
)])
def save_meta(self, fieldreport, meta, is_update=False):
if is_update:
ActionsTaken.objects.filter(field_report=fieldreport).delete()
FieldReportContact.objects.filter(field_report=fieldreport).delete()
Source.objects.filter(field_report=fieldreport).delete()
if 'actions_taken' in meta:
for action in meta['actions_taken']:
actions = action['actions']
del action['actions']
actions_taken = ActionsTaken.objects.create(field_report=fieldreport, **action)
actions_taken.actions.add(*actions)
if 'contacts' in meta:
FieldReportContact.objects.bulk_create(
[FieldReportContact(field_report=fieldreport, **fields) for fields in meta['contacts']]
)
if 'sources' in meta:
for source in meta['sources']:
stype, created = SourceType.objects.get_or_create(name=source['stype'])
source['stype'] = stype
Source.objects.bulk_create(
[Source(field_report=fieldreport, **fields) for fields in meta['sources']]
)
class CreateFieldReport(CreateAPIView, GenericFieldReportView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = FieldReport.objects.all()
serializer_class = CreateFieldReportSerializer
def create_event(self, report):
event = Event.objects.create(
name=report.summary,
dtype=report.dtype,
disaster_start_date=report.start_date,
auto_generated=True,
auto_generated_source=SOURCES['new_report'],
)
report.event = event
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 09:17:23 2018
@author: Manuel
the pentaton logic go around II
"""
import random
random.seed(0)
print(random.getrandbits(5))
# =============================================================================
#
# Variables
# =============================================================================
# one board track with all tiles from start to finish
tile_dict = {}
# =============================================================================
#
# aBout the Board:
# =============================================================================
class Board:
def __init__(self, length, min_val, max_val):
self.min_val = min_val
self.max_val = max_val
self.value = self.create_tile(min_val, max_val)
self.length = length
self.track1 = self.build_track()
self.track2 = self.build_track()
self.track3 = self.build_track()
self.track4 = self.build_track()
self.track5 = self.build_track()
self.dict_ = {'Tracks': [self.track1, self.track2, self.track3, self.track4, self.track5]}
if 'Wit' in self.value.keys():
self.Wit = self.value['Wit']
else:
self.Wit = None
if 'Stren' in self.value.keys():
self.Stren = self.value['Stren']
else:
self.Stren = None
if 'Dex' in self.value.keys():
self.Dex = self.value['Dex']
else:
self.Dex = None
if 'Intel' in self.value.keys():
self.Intel = self.value['Intel']
else:
self.Intel = None
def create_tile(self, min_val, max_val):
pos = ['Wit', 'Stren', 'Dex', 'Intel']
pos = random.sample(pos, 2)
tile_value = {}
for i in pos:
tile_value[i] = random.randrange(min_val, max_val+1)
return tile_value
def build_track(self):
tile_dict = {}
var_min = self.min_val
var_max = self.max_val
for i in range(self.length):
tile_dict[i] = self.create_tile(var_min, var_max)
var_min += 3
var_max += 3
print('tile_track created.')
return tile_dict
# def __str__(self):
# return '\n'.join(str(i) for i in self.dict_['Tracks'])
#
# def __str__(self):
# #return '{}'.format(i) (for board.dict_['Tracks'][i] in board.dict_)
# return ','.join("{}\n".format(i) for i in self.dict_['Tracks'])
#
# def __str__(self):
# return ('{}\n{}\n{}\n{}\n{}'.format(self.track1, self.track2, self.track3, self.track4, self.track5))
board = Board(10, 1, 4)
print(board.track1, board.track2)
track1 = Board(10, 1, 4).track1
print(track1)
track2 = Board(10, 1, 4).track2
print(track2)
print(board.dict_['Tracks'][3])
print(board)
tr_dict = board.dict_
for i in tr_dict['Tracks']:
print('\n\n', i)
# =============================================================================
#
# concerning Attributes:
# =============================================================================
class Attribute:
"""the base atr-class"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def increase_value(self, increment):
self.value += increment
def getValue(self):
return int(self.value)
class Wit(Attribute):
"""Wit-atr w/ wit-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Stren(Attribute):
"""Stren-atr w/ stren-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Dex(Attribute):
"""Dex-atr w/ dex-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Intel(Attribute):
"""Intel-atr w/ intel-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
"""
test = Wit(5)
print(test)
test.getValue()
test.increase_value(2)
test.getValue()
"""
# =============================================================================
#
# playing with the Player:
# =============================================================================
class Player():
"""the player base class."""
def __init__(self, name, text, Wit, Stren, Dex, Intel, ):
self.name = name
self.text = text
self.Wit = Wit
self.Stren = Stren
self.Dex = Dex
self.Intel = Intel
self.atr_tup = ('Wit', 'Stren', 'Dex', 'Intel')
self.atr_self_tup = (self.Wit, self.Stren, self.Dex, self.Intel)
self.track = 0
self.tier_complete = 0
self.hand = []
def __str__(self):
return "\nPlayer Name: {} \n{}'s Wit: {} \n{}'s Stren: {} \n{}'s Dex: {}\
\n{}'s Intel: {} {} Cards: {}".format\
(self.name, self.name, self.Wit, self.name, self.Stren, self.name, self.Dex,\
self.name, self.Intel, self.name, self.hand)
def test_attribute(self, attribute, test_value):
"""tests attribute, the self.atr against a test_value."""
global tile_dict
atr = attribute
if atr >= test_value:
print('you passed')
return True
else:
print('you failed')
return False
def tile_check(self):
"""lets player select the atr he wants to challenge in self.Track and self.tier;
if passsed applies lvl-up m/ to player."""
tier = tile_dict['Tracks'][self.track][self.tier_complete]
while True:
print(tier)
atr = input('\nwhich atr to challange: ').capitalize()
if atr not in tier:
print('\nno match, try agian..')
else:
print('\natrb choice accepted.')
break
if atr == 'Wit' and self.test_attribute(self.Wit.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Stren' and self.test_attribute(self.Stren.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Dex' and self.test_attribute(self.Dex.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Intel' and self.test_attribute(self.Intel.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
def level_up(self):
"""after a succesfull self.tile_check() it allows the player to increase one
of his atris by +1 and increases self.tier_complete."""
print('\nyour current stats: Wit: {} Stren: {} Dex: {} Intel: {}.'.format\
(self.Wit, self.Stren, self.Dex, self.Intel))
while True:
up = input('\nwhich atribute do you wanna level up(+1)? ').capitalize()
if up not in self.atr_tup:
print('\nplease retype, couldnt understand your input.')
else:
if up == 'Wit':
self.Wit.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Stren':
self.Stren.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Dex':
self.Dex.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Intel':
self.Intel.increase_value(1); print('\ngz, your new self:\n', self); break
def draw_card(self, amount):
i = 0
while i <= amount:
try:
rand_index = random.randrange(0, deck.deck_size())
drawn_card = deck.deck[rand_index]
drawn_card.location = 'hand'
self.hand.append(drawn_card)
deck.deck.pop(rand_index)
print(self.hand)
i += 1
except ValueError:
print('pile exhausted, shuffle yard back to deck\n raised by draw_card.')
break
def discard_card(self):
pass
def play_card(self):
pass
def return_from_yard(self):
pass
"""
drizzt = Player('Drizzt', 'can kill stuff', Wit(21), Stren(5), Dex(5), Intel(5))
print(drizzt)
drizzt.Dex.increase_value(15)
print(drizzt)
drizzt.Dex.getValue()
drizzt.test_attribute(drizzt.Dex.getValue(), 11)
type(drizzt.Dex)
drizzt.tile_check()
drizzt.Wit.ability()
"""
drizzt = Player('Drizzt', 'can kill stuff', Wit(21), Stren(5), Dex(5), Intel(5))
print(drizzt)
# =============================================================================
#
# just read the fucking card:
# =============================================================================
class Card():
"""description of Card; with M/ for testing im card can target a player and
applying the card effect depending on temp/perm mod effect"""
def __init__(self, name, text, atr, atri_mod, temp):
self.name = name
self.text = text
self.atr = atr
self.atri_mod = atri_mod
self.temp = temp
self.container = []
def __str__(self):
return '\nCard name: {}\nText: {}\nModifies: {}\nBy: {}\nTemp: {}'.format\
(self.name, self.text, self.atr, self.atri_mod, self.temp)
def test_card(self):
"""tests if card can mod a player, by checking if the mod is type(int)"""
return isinstance(self.atri_mod, int)
def show_cards(self):
"""prints/returns a list of all cards in contianer."""
for card in self.container:
print(card)
def mod_player(self, player):
"""applys the atri-mod-number to eihter the player's temp or perm atr(attribute),
if atri-mod would set art to < 0 it sets 0 instead."""
if self.test_card() == True and self.atr in dir(player):
if self.temp:
if self.atr == 'Stren' and not player.Stren.getValue() + self.atri_mod < 0:
player.Stren.increase_value(self.atri_mod)
else:
setattr(player, 'Stren', Stren(0))
if self.atr == 'Dex' and not player.Dex.getValue() + self.atri_mod < 0:
player.Dex.increase_value(self.atri_mod)
else:
setattr(player, 'Dex', Dex(0))
if self.atr == 'Wit' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
if self.atr == 'Intel' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
else:
if self.atr == 'Stren' and not player.Stren.getValue() + self.atri_mod < 0:
player.Stren.increase_value(self.atri_mod)
else:
setattr(player, 'Stren', Stren(0))
if self.atr == 'Dex' and not player.Dex.getValue() + self.atri_mod < 0:
player.Dex.increase_value(self.atri_mod)
else:
setattr(player, 'Dex', Dex(0))
if self.atr == 'Wit' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
if self.atr == 'Intel' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
else:
print('it is not a moddable target.')
"""
testCard = Card('Strength Potion', 'adds +2 to your strength', 'Stren', 10, True)
print(testCard)
a = testCard.test_card()
print(a)
testCard.mod_player(drizzt)
print(drizzt)
#print(drizzt.Stren.getValue)
#print(dir(Player))
"""
# =============================================================================
#
# | |
len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
class cp2k_motion_md_barostat_print_energy:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_motion_md_barostat_print_energy_each()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t&ENERGY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t&END ENERGY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
class cp2k_motion_md_barostat_print:
def __init__(self):
self.params = {
}
self.status = False
self.energy = cp2k_motion_md_barostat_print_energy()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t&PRINT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.energy.status == True:
self.energy.to_input(fout)
fout.write("\t\t\t&END PRINT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[3] == "ENERGY":
self.energy.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_ad_langevin_chi:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&CHI\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END CHI\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_ad_langevin_mass:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&MASS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END MASS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_ad_langevin:
def __init__(self):
self.params = {
}
self.status = False
self.chi = cp2k_motion_md_barostat_thermostat_ad_langevin_chi()
self.mass = cp2k_motion_md_barostat_thermostat_ad_langevin_mass()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t&AD_LANGEVIN\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.chi.status == True:
self.chi.to_input(fout)
if self.mass.status == True:
self.mass.to_input(fout)
fout.write("\t\t\t\t&END AD_LANGEVIN\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "CHI":
self.chi.set_params({item: params[item]})
elif item.split("-")[4] == "MASS":
self.mass.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_csvr_rng_init:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&RNG_INIT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END RNG_INIT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_csvr_thermostat_energy:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&THERMOSTAT_ENERGY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END THERMOSTAT_ENERGY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_csvr:
def __init__(self):
self.params = {
}
self.status = False
self.rng_init = cp2k_motion_md_barostat_thermostat_csvr_rng_init()
self.thermostat_energy = cp2k_motion_md_barostat_thermostat_csvr_thermostat_energy()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t&CSVR\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.rng_init.status == True:
self.rng_init.to_input(fout)
if self.thermostat_energy.status == True:
self.thermostat_energy.to_input(fout)
fout.write("\t\t\t\t&END CSVR\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "RNG_INIT":
self.rng_init.set_params({item: params[item]})
elif item.split("-")[4] == "THERMOSTAT_ENERGY":
self.thermostat_energy.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_gle_rng_init:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&RNG_INIT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END RNG_INIT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_gle_s:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&S\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END S\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_gle_thermostat_energy:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&THERMOSTAT_ENERGY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END THERMOSTAT_ENERGY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_gle:
def __init__(self):
self.params = {
}
self.status = False
self.rng_init = cp2k_motion_md_barostat_thermostat_gle_rng_init()
self.s = cp2k_motion_md_barostat_thermostat_gle_s()
self.thermostat_energy = cp2k_motion_md_barostat_thermostat_gle_thermostat_energy()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t&GLE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.rng_init.status == True:
self.rng_init.to_input(fout)
if self.s.status == True:
self.s.to_input(fout)
if self.thermostat_energy.status == True:
self.thermostat_energy.to_input(fout)
fout.write("\t\t\t\t&END GLE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "RNG_INIT":
self.rng_init.set_params({item: params[item]})
elif item.split("-")[4] == "S":
self.s.set_params({item: params[item]})
elif item.split("-")[4] == "THERMOSTAT_ENERGY":
self.thermostat_energy.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_nose_coord:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&COORD\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END COORD\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_nose_force:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&FORCE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END FORCE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_nose_mass:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&MASS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END MASS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_nose_velocity:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&VELOCITY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t&END VELOCITY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_nose:
def __init__(self):
self.params = {
"LENGTH": None,
"MTS": None,
"TIMECON": None,
"YOSHIDA": None,
}
self.status = False
self.coord = cp2k_motion_md_barostat_thermostat_nose_coord()
self.force = cp2k_motion_md_barostat_thermostat_nose_force()
self.mass = cp2k_motion_md_barostat_thermostat_nose_mass()
self.velocity = cp2k_motion_md_barostat_thermostat_nose_velocity()
# basic setting
self.params["LENGTH"] = 3
self.params["MTS"] = 2
self.params["TIMECON"] = 1.0e3
self.params["YOSHIDA"] = 3
def to_input(self, fout):
fout.write("\t\t\t\t&NOSE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.coord.status == True:
self.coord.to_input(fout)
if self.force.status == True:
self.force.to_input(fout)
if self.mass.status == True:
self.mass.to_input(fout)
if self.velocity.status == True:
self.velocity.to_input(fout)
fout.write("\t\t\t\t&END NOSE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "VELOCITY":
self.velocity.set_params({item: params[item]})
elif item.split("-")[4] == "MASS":
self.mass.set_params({item: params[item]})
elif item.split("-")[4] == "FORCE":
self.force.set_params({item: params[item]})
elif item.split("-")[4] == "COORD":
self.coord.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_print_energy_each:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_print_energy:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_motion_md_barostat_thermostat_print_energy_each()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&ENERGY\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END ENERGY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_print_temperature_each:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_md_barostat_thermostat_print_temperature:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_motion_md_barostat_thermostat_print_temperature_each()
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t&TEMPERATURE\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, self.params[item]))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END TEMPERATURE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_motion_md_barostat_thermostat_print_thermostat_info_each:
def __init__(self):
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
| |
<gh_stars>0
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import errno
import fcntl
import os
import socket
import time
from datetime import datetime
from typing import Dict, Tuple # novm
import llnl.util.tty as tty
import spack.util.string
__all__ = [
'Lock',
'LockDowngradeError',
'LockUpgradeError',
'LockTransaction',
'WriteTransaction',
'ReadTransaction',
'LockError',
'LockTimeoutError',
'LockPermissionError',
'LockROFileError',
'CantCreateLockError'
]
#: Mapping of supported locks to description
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
#: A useful replacement for functions that should return True when not provided
#: for example.
true_fn = lambda: True
class OpenFile(object):
"""Record for keeping track of open lockfiles (with reference counting).
There's really only one ``OpenFile`` per inode, per process, but we record the
filehandle here as it's the thing we end up using in python code. You can get
the file descriptor from the file handle if needed -- or we could make this track
file descriptors as well in the future.
"""
def __init__(self, fh):
self.fh = fh
self.refs = 0
class OpenFileTracker(object):
"""Track open lockfiles, to minimize number of open file descriptors.
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
This is convenient, because if a process exits, it releases its locks.
Unfortunately, this also means that if you close a file, *all* locks associated
with that file's inode are released, regardless of whether the process has any
other open file descriptors on it.
Because of this, we need to track open lock files so that we only close them when
a process no longer needs them. We do this by tracking each lockfile by its
inode and process id. This has several nice properties:
1. Tracking by pid ensures that, if we fork, we don't inadvertently track the parent
process's lockfiles. ``fcntl`` locks are not inherited across forks, so we'll
just track new lockfiles in the child.
2. Tracking by inode ensures that referencs are counted per inode, and that we don't
inadvertently close a file whose inode still has open locks.
3. Tracking by both pid and inode ensures that we only open lockfiles the minimum
number of times necessary for the locks we have.
Note: as mentioned elsewhere, these locks aren't thread safe -- they're designed to
work in Python and assume the GIL.
"""
def __init__(self):
"""Create a new ``OpenFileTracker``."""
self._descriptors = {} # type: Dict[Tuple[int, int], OpenFile]
def get_fh(self, path):
"""Get a filehandle for a lockfile.
This routine will open writable files for read/write even if you're asking
for a shared (read-only) lock. This is so that we can upgrade to an exclusive
(write) lock later if requested.
Arguments:
path (str): path to lock file we want a filehandle for
"""
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
pid = os.getpid()
open_file = None # OpenFile object, if there is one
stat = None # stat result for the lockfile, if it exists
try:
# see whether we've seen this inode/pid before
stat = os.stat(path)
key = (stat.st_ino, pid)
open_file = self._descriptors.get(key)
except OSError as e:
if e.errno != errno.ENOENT: # only handle file not found
raise
# path does not exist -- fail if we won't be able to create it
parent = os.path.dirname(path) or '.'
if not os.access(parent, os.W_OK):
raise CantCreateLockError(path)
# if there was no already open file, we'll need to open one
if not open_file:
if stat and not os.access(path, os.W_OK):
# we know path exists but not if it's writable. If it's read-only,
# only open the file for reading (and fail if we're trying to get
# an exclusive (write) lock on it)
os_mode, fh_mode = os.O_RDONLY, 'r'
fd = os.open(path, os_mode)
fh = os.fdopen(fd, fh_mode)
open_file = OpenFile(fh)
# if we just created the file, we'll need to get its inode here
if not stat:
inode = os.fstat(fd).st_ino
key = (inode, pid)
self._descriptors[key] = open_file
open_file.refs += 1
return open_file.fh
def release_fh(self, path):
"""Release a filehandle, only closing it if there are no more references."""
try:
inode = os.stat(path).st_ino
except OSError as e:
if e.errno != errno.ENOENT: # only handle file not found
raise
inode = None # this will not be in self._descriptors
key = (inode, os.getpid())
open_file = self._descriptors.get(key)
assert open_file, "Attempted to close non-existing lock path: %s" % path
open_file.refs -= 1
if not open_file.refs:
del self._descriptors[key]
open_file.fh.close()
#: Open file descriptors for locks in this process. Used to prevent one process
#: from opening the sam file many times for different byte range locks
file_tracker = OpenFileTracker()
def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try
if nattempts <= 1:
return ''
attempts = spack.util.string.plural(nattempts, 'attempt')
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
class Lock(object):
"""This is an implementation of a filesystem lock using Python's lockf.
In Python, ``lockf`` actually calls ``fcntl``, so this should work with
any filesystem implementation that supports locking through the fcntl
calls. This includes distributed filesystems like Lustre (when flock
is enabled) and recent NFS versions.
Note that this is for managing contention over resources *between*
processes and not for managing contention between threads in a process: the
functions of this object are not thread-safe. A process also must not
maintain multiple locks on the same file (or, more specifically, on
overlapping byte ranges in the same file).
"""
def __init__(self, path, start=0, length=0, default_timeout=None,
debug=False, desc=''):
"""Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally,
caller can specify a byte range beginning ``start`` bytes from
the start of the file and extending ``length`` bytes from there.
This exposes a subset of fcntl locking functionality. It does
not currently expose the ``whence`` parameter -- ``whence`` is
always ``os.SEEK_SET`` and ``start`` is always evaluated from the
beginning of the file.
Args:
path (str): path to the lock
start (int): optional byte offset at which the lock starts
length (int): optional number of bytes to lock
default_timeout (int): number of seconds to wait for lock attempts,
where None means to wait indefinitely
debug (bool): debug mode specific to locking
desc (str): optional debug message lock description, which is
helpful for distinguishing between different Spack locks.
"""
self.path = path
self._file = None
self._reads = 0
self._writes = 0
# byte range parameters
self._start = start
self._length = length
# enable debug mode
self.debug = debug
# optional debug description
self.desc = ' ({0})'.format(desc) if desc else ''
# If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the
# user sets a timeout for each attempt)
self.default_timeout = default_timeout or None
# PID and host of lock holder (only used in debug mode)
self.pid = self.old_pid = None
self.host = self.old_host = None
@staticmethod
def _poll_interval_generator(_wait_times=None):
"""This implements a backoff scheme for polling a contended resource
by suggesting a succession of wait times between polls.
It suggests a poll interval of .1s until 2 seconds have passed,
then a poll interval of .2s until 10 seconds have passed, and finally
(for all requests after 10s) suggests a poll interval of .5s.
This doesn't actually track elapsed time, it estimates the waiting
time as though the caller always waits for the full length of time
suggested by this function.
"""
num_requests = 0
stage1, stage2, stage3 = _wait_times or (1e-1, 2e-1, 5e-1)
wait_time = stage1
while True:
if num_requests >= 60: # 40 * .2 = 8
wait_time = stage3
elif num_requests >= 20: # 20 * .1 = 2
wait_time = stage2
num_requests += 1
yield wait_time
def __repr__(self):
"""Formal representation of the lock."""
rep = '{0}('.format(self.__class__.__name__)
for attr, value in self.__dict__.items():
rep += '{0}={1}, '.format(attr, value.__repr__())
return '{0})'.format(rep.strip(', '))
def __str__(self):
| |
empty container for results
out = {}
# Poincaré features (SD1, SD2, etc.)
out = _hrv_nonlinear_poincare(rri, out)
# Heart Rate Fragmentation
out = _hrv_nonlinear_fragmentation(rri, out)
# Heart Rate Asymmetry
out = _hrv_nonlinear_poincare_hra(rri, out)
# DFA
out = _hrv_dfa(peaks, rri, out, **kwargs)
# Complexity
tolerance = 0.2 * np.std(rri, ddof=1)
out["ApEn"], _ = entropy_approximate(rri, delay=1, dimension=2, tolerance=tolerance)
out["SampEn"], _ = entropy_sample(rri, delay=1, dimension=2, tolerance=tolerance)
out["ShanEn"], _ = entropy_shannon(rri)
out["FuzzyEn"], _ = entropy_fuzzy(rri, delay=1, dimension=2, tolerance=tolerance)
out["MSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="MSEn")
out["CMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="CMSEn")
out["RCMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="RCMSEn")
out["CD"], _ = fractal_correlation(rri, delay=1, dimension=2, **kwargs)
out["HFD"], _ = fractal_higuchi(rri, k_max=10, **kwargs)
out["KFD"], _ = fractal_katz(rri)
out["LZC"], _ = complexity_lempelziv(rri, **kwargs)
if show:
_hrv_nonlinear_show(rri, out)
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
return out
# =============================================================================
# Get SD1 and SD2
# =============================================================================
def _hrv_nonlinear_poincare(rri, out):
"""Compute SD1 and SD2.
- Do existing measures of Poincare plot geometry reflect nonlinear features of heart rate
variability? - Brennan (2001)
"""
# HRV and hrvanalysis
rri_n = rri[:-1]
rri_plus = rri[1:]
x1 = (rri_n - rri_plus) / np.sqrt(2) # Eq.7
x2 = (rri_n + rri_plus) / np.sqrt(2)
sd1 = np.std(x1, ddof=1)
sd2 = np.std(x2, ddof=1)
out["SD1"] = sd1
out["SD2"] = sd2
# SD1 / SD2
out["SD1SD2"] = sd1 / sd2
# Area of ellipse described by SD1 and SD2
out["S"] = np.pi * out["SD1"] * out["SD2"]
# CSI / CVI
T = 4 * out["SD1"]
L = 4 * out["SD2"]
out["CSI"] = L / T
out["CVI"] = np.log10(L * T)
out["CSI_Modified"] = L ** 2 / T
return out
def _hrv_nonlinear_poincare_hra(rri, out):
"""Heart Rate Asymmetry Indices.
- Asymmetry of Poincaré plot (or termed as heart rate asymmetry, HRA) - Yan (2017)
- Asymmetric properties of long-term and total heart rate variability - Piskorski (2011)
"""
N = len(rri) - 1
x = rri[:-1] # rri_n, x-axis
y = rri[1:] # rri_plus, y-axis
diff = y - x
decelerate_indices = np.where(diff > 0)[0] # set of points above IL where y > x
accelerate_indices = np.where(diff < 0)[0] # set of points below IL where y < x
nochange_indices = np.where(diff == 0)[0]
# Distances to centroid line l2
centroid_x = np.mean(x)
centroid_y = np.mean(y)
dist_l2_all = abs((x - centroid_x) + (y - centroid_y)) / np.sqrt(2)
# Distances to LI
dist_all = abs(y - x) / np.sqrt(2)
# Calculate the angles
theta_all = abs(np.arctan(1) - np.arctan(y / x)) # phase angle LI - phase angle of i-th point
# Calculate the radius
r = np.sqrt(x ** 2 + y ** 2)
# Sector areas
S_all = 1 / 2 * theta_all * r ** 2
# Guzik's Index (GI)
den_GI = np.sum(dist_all)
num_GI = np.sum(dist_all[decelerate_indices])
out["GI"] = (num_GI / den_GI) * 100
# Slope Index (SI)
den_SI = np.sum(theta_all)
num_SI = np.sum(theta_all[decelerate_indices])
out["SI"] = (num_SI / den_SI) * 100
# Area Index (AI)
den_AI = np.sum(S_all)
num_AI = np.sum(S_all[decelerate_indices])
out["AI"] = (num_AI / den_AI) * 100
# Porta's Index (PI)
m = N - len(nochange_indices) # all points except those on LI
b = len(accelerate_indices) # number of points below LI
out["PI"] = (b / m) * 100
# Short-term asymmetry (SD1)
sd1d = np.sqrt(np.sum(dist_all[decelerate_indices] ** 2) / (N - 1))
sd1a = np.sqrt(np.sum(dist_all[accelerate_indices] ** 2) / (N - 1))
sd1I = np.sqrt(sd1d ** 2 + sd1a ** 2)
out["C1d"] = (sd1d / sd1I) ** 2
out["C1a"] = (sd1a / sd1I) ** 2
out["SD1d"] = sd1d # SD1 deceleration
out["SD1a"] = sd1a # SD1 acceleration
# out["SD1I"] = sd1I # SD1 based on LI, whereas SD1 is based on centroid line l1
# Long-term asymmetry (SD2)
longterm_dec = np.sum(dist_l2_all[decelerate_indices] ** 2) / (N - 1)
longterm_acc = np.sum(dist_l2_all[accelerate_indices] ** 2) / (N - 1)
longterm_nodiff = np.sum(dist_l2_all[nochange_indices] ** 2) / (N - 1)
sd2d = np.sqrt(longterm_dec + 0.5 * longterm_nodiff)
sd2a = np.sqrt(longterm_acc + 0.5 * longterm_nodiff)
sd2I = np.sqrt(sd2d ** 2 + sd2a ** 2)
out["C2d"] = (sd2d / sd2I) ** 2
out["C2a"] = (sd2a / sd2I) ** 2
out["SD2d"] = sd2d # SD2 deceleration
out["SD2a"] = sd2a # SD2 acceleration
# out["SD2I"] = sd2I # identical with SD2
# Total asymmerty (SDNN)
sdnnd = np.sqrt(0.5 * (sd1d ** 2 + sd2d ** 2)) # SDNN deceleration
sdnna = np.sqrt(0.5 * (sd1a ** 2 + sd2a ** 2)) # SDNN acceleration
sdnn = np.sqrt(sdnnd ** 2 + sdnna ** 2) # should be similar to sdnn in hrv_time
out["Cd"] = (sdnnd / sdnn) ** 2
out["Ca"] = (sdnna / sdnn) ** 2
out["SDNNd"] = sdnnd
out["SDNNa"] = sdnna
return out
def _hrv_nonlinear_fragmentation(rri, out):
"""Heart Rate Fragmentation Indices - Costa (2017)
The more fragmented a time series is, the higher the PIP, IALS, PSS, and PAS indices will be.
"""
diff_rri = np.diff(rri)
zerocrossings = signal_zerocrossings(diff_rri)
# Percentage of inflection points (PIP)
out["PIP"] = len(zerocrossings) / len(rri)
# Inverse of the average length of the acceleration/deceleration segments (IALS)
accelerations = np.where(diff_rri > 0)[0]
decelerations = np.where(diff_rri < 0)[0]
consecutive = find_consecutive(accelerations) + find_consecutive(decelerations)
lengths = [len(i) for i in consecutive]
out["IALS"] = 1 / np.average(lengths)
# Percentage of short segments (PSS) - The complement of the percentage of NN intervals in
# acceleration and deceleration segments with three or more NN intervals
out["PSS"] = np.sum(np.asarray(lengths) < 3) / len(lengths)
# Percentage of NN intervals in alternation segments (PAS). An alternation segment is a sequence
# of at least four NN intervals, for which heart rate acceleration changes sign every beat. We note
# that PAS quantifies the amount of a particular sub-type of fragmentation (alternation). A time
# series may be highly fragmented and have a small amount of alternation. However, all time series
# with large amount of alternation are highly fragmented.
alternations = find_consecutive(zerocrossings)
lengths = [len(i) for i in alternations]
out["PAS"] = np.sum(np.asarray(lengths) >= 4) / len(lengths)
return out
# =============================================================================
# DFA
# =============================================================================
def _hrv_dfa(peaks, rri, out, n_windows="default", **kwargs):
# if "dfa_windows" in kwargs:
# dfa_windows = kwargs["dfa_windows"]
# else:
# dfa_windows = [(4, 11), (12, None)]
# consider using dict.get() mthd directly
dfa_windows = kwargs.get("dfa_windows", [(4, 11), (12, None)])
# Determine max beats
if dfa_windows[1][1] is None:
max_beats = len(peaks) / 10
else:
max_beats = dfa_windows[1][1]
# No. of windows to compute for short and long term
if n_windows == "default":
n_windows_short = int(dfa_windows[0][1] - dfa_windows[0][0] + 1)
n_windows_long = int(max_beats - dfa_windows[1][0] + 1)
elif isinstance(n_windows, list):
n_windows_short = n_windows[0]
n_windows_long = n_windows[1]
# Compute DFA alpha1
short_window = np.linspace(dfa_windows[0][0], dfa_windows[0][1], n_windows_short).astype(int)
# For monofractal
out["DFA_alpha1"], _ = fractal_dfa(rri, multifractal=False, scale=short_window, **kwargs)
# For multifractal
mdfa_alpha1, _ = fractal_dfa(
rri, multifractal=True, q=np.arange(-5, 6), scale=short_window, **kwargs
)
for k in mdfa_alpha1.columns:
out["MFDFA_alpha1_" + k] = mdfa_alpha1[k].values[0]
# Compute DFA alpha2
# sanatize max_beats
if max_beats < dfa_windows[1][0] + 1:
warn(
"DFA_alpha2 related indices will not be calculated. "
"The maximum duration of the windows provided for the long-term correlation is smaller "
"than the minimum duration of windows. Refer to the `scale` argument in `nk.fractal_dfa()` "
"for more information.",
category=NeuroKitWarning,
)
return out
else:
long_window = np.linspace(dfa_windows[1][0], int(max_beats), n_windows_long).astype(int)
# For monofractal
out["DFA_alpha2"], _ = fractal_dfa(rri, multifractal=False, scale=long_window, **kwargs)
# For multifractal
mdfa_alpha2, _ = fractal_dfa(
rri, multifractal=True, q=np.arange(-5, 6), scale=long_window, **kwargs
)
for k in mdfa_alpha2.columns:
out["MFDFA_alpha2_" + k] = mdfa_alpha2[k].values[0]
return out
# =============================================================================
# Plot
# =============================================================================
def _hrv_nonlinear_show(rri, out, ax=None, ax_marg_x=None, ax_marg_y=None):
mean_heart_period = np.mean(rri)
sd1 = out["SD1"]
sd2 = out["SD2"]
if isinstance(sd1, pd.Series):
sd1 = float(sd1)
if isinstance(sd2, pd.Series):
sd2 = float(sd2)
# Poincare values
ax1 = rri[:-1]
ax2 = rri[1:]
# Set grid boundaries
ax1_lim = (max(ax1) - min(ax1)) / 10
ax2_lim = (max(ax2) - min(ax2)) / 10
ax1_min = min(ax1) - ax1_lim
ax1_max = max(ax1) + ax1_lim
ax2_min = min(ax2) - ax2_lim
ax2_max = max(ax2) + ax2_lim
# Prepare figure
if ax is None and ax_marg_x is None and ax_marg_y is | |
# type: ignore
from typing import Union, List, Dict
from urllib.parse import urlparse
import urllib3
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject, MISPSighting, MISPEvent, MISPAttribute
from pymisp.tools import GenericObjectGenerator
import copy
from pymisp.tools import FileObject
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def handle_connection_errors(error):
if "SSLError" in error:
return_error('Unable to connect to MISP because of a SSLCertVerificationError, '
'Please try to use the Trust any certificate option.')
if "NewConnectionError" in error:
return_error('Unable to connect to MISP because of a NewConnectionError, '
'Please make sure your MISP server url is correct.')
if "Please make sure the API key and the URL are correct" in error:
return_error('Unable to connect to MISP, '
'Please make sure the API key is correct.')
return_error(error)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
params = demisto.params()
if not params.get('credentials') or not (MISP_API_KEY := params.get('credentials', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
MISP_URL = params.get('url')
VERIFY = not params.get('insecure')
PROXIES = handle_proxy() # type: ignore
try:
PYMISP = ExpandedPyMISP(url=MISP_URL, key=MISP_API_KEY, ssl=VERIFY, proxies=PROXIES)
except PyMISPError as e:
handle_connection_errors(e.message)
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
THREAT_LEVELS_TO_ID = {
'High': 1,
'Medium': 2,
'Low': 3,
'Unknown': 4
}
MISP_ENTITIES_TO_CONTEXT_DATA = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'LastChanged',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'CreationDate',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganizationID',
'Org': 'Organization',
'Orgc': 'OwnerOrganization',
'orgc_uuid': 'OwnerOrganization.UUID',
'orgc_id': 'OwnerOrganization.ID',
'orgc_name': 'OwnerOrganization.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore',
'first_seen': 'first_seen',
'last_seen': 'last_seen',
'provider': 'Provider',
'source_format': 'SourceFormat',
'url': 'URL',
'event_uuids': 'EventUUIDS',
}
MISP_ANALYSIS_TO_IDS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
MISP_DISTRIBUTION_TO_IDS = {
'Your_organization_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3,
'Inherit_event': 5
}
SIGHTING_TYPE_NAME_TO_ID = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
SIGHTING_TYPE_ID_TO_NAME = {
'0': 'sighting',
'1': 'false_positive',
'2': 'expiration'
}
INDICATOR_TYPE_TO_DBOT_SCORE = {
'FILE': DBotScoreType.FILE,
'URL': DBotScoreType.URL,
'DOMAIN': DBotScoreType.DOMAIN,
'IP': DBotScoreType.IP,
'EMAIL': DBotScoreType.EMAIL,
}
DOMAIN_REGEX = (
r"([a-z¡-\uffff0-9](?:[a-z¡-\uffff0-9-]{0,61}"
"[a-z¡-\uffff0-9])?(?:\\.(?!-)[a-z¡-\uffff0-9-]{1,63}(?<!-))*"
"\\.(?!-)(?!(jpg|jpeg|exif|tiff|tif|png|gif|otf|ttf|fnt|dtd|xhtml|css"
"|html)$)(?:[a-z¡-\uffff-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?$"
"|localhost)"
)
MISP_SEARCH_ARGUMENTS = [
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'event_id',
'uuid',
'to_ids',
'last',
'include_decay_score',
'include_sightings',
'include_correlations',
'limit',
'page',
'enforceWarninglist',
'include_feed_correlations',
]
EVENT_FIELDS = [
'id',
'orgc_id',
'org_id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'attribute_count',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'RelatedEvent',
'Galaxy',
'Tag',
'decay_score',
'Object',
'Feed',
]
ATTRIBUTE_FIELDS = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'first_seen',
'last_seen',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score',
'Sighting',
]
def extract_error(error: list) -> List[dict]:
"""
Extracting errors raised by PYMISP into readable response, for more information and examples
please see UT: test_extract_error.
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def dict_to_generic_object_format(args: dict) -> List[dict]:
"""
Converts args dict into a list, please see GenericObjectGenerator Class in Pymisp.
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in https://github.com/MISP/misp-objects
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def misp_convert_timestamp_to_date_string(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%dT%H:%M:%SZ') if timestamp else ""
def replace_keys_from_misp_to_context_data(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys_from_misp_to_context_data(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(MISP_ENTITIES_TO_CONTEXT_DATA[key] if key in MISP_ENTITIES_TO_CONTEXT_DATA else key):
replace_keys_from_misp_to_context_data(value) for key, value in obj_to_build.items()
}
return obj_to_build
def reputation_command_to_human_readable(outputs, score, events_to_human_readable):
found_tag_id, found_tag_name = "", ""
for event in events_to_human_readable:
# removing those fields as they are shared by the events
found_tag_id = event.pop('Tag_ID')
found_tag_name = event.pop('Tag_Name')
return {
'Attribute Type': outputs[0].get('Type'),
'Dbot Score': score,
'Attribute Value': outputs[0].get('Value'),
'Attribute Category': outputs[0].get('Category'),
'Timestamp': outputs[0].get('Timestamp'),
'Events with the scored tag': events_to_human_readable,
'Scored Tag ID': found_tag_id,
'Scored Tag Name': found_tag_name,
}
def limit_tag_output_to_id_and_name(attribute_dict, is_event_level):
"""
As tag list can be full of in unnecessary data, we want to limit this list to include only the ID and Name fields.
In addition, returns set of the found tag ids.
Some tags have a field called inherited. When it is set to 1 it says that it is an event's tag.
Otherwise (if it is set to 0 or not exists) it says that it is an attribute's tag.
If the data is event's (is_event_level = true) we would like to add to tag_set_ids all the tags
(event ones and the event's attribute tags ones as it is part of the event scope).
If the data is attribute's (is_event_level = false), and the tag is only related to an attribute
we would like to add it to tag_set_ids. In any other case, we won't add the tag.
Args:
attribute_dict (dict): The dictionary that includes the tag list.
is_event_level (bool): Whether the attribute_dict was received from an event object,
meaning the tags are event's ones. Otherwise, the data is attribute's (attribute tags).
"""
output = []
tag_set_ids = set()
tags_list = attribute_dict.get('Tag', [])
for tag in tags_list:
is_event_tag = tag.get('inherited', 0) # field doesn't exist when this is an attribute level, default is '0'
tag_id = tag.get('id')
if is_event_level:
tag_set_ids.add(tag_id)
else: # attribute level
if not is_event_tag:
tag_set_ids.add(tag_id)
output.append({'ID': tag_id, 'Name': tag.get('name')})
return output, tag_set_ids
def parse_response_reputation_command(misp_response, malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
After getting all the attributes which match the required indicator value, this function parses the response.
This function goes over all the attributes that found (after limit the attributes amount to the given limit)
and by sub-functions calculated the score of the indicator.
For the context data outputs, for every attribute we remove the "Related Attribute" list and limits the tags and
galaxies lists. Eventually, the outputs will be a list of attributes along with their events objects.
Note: When limits the attributes amount, we sort the attributes list by the event ids as the greater event ids are
the newer ones.
Returns:
response (dict): The parsed outputs to context data (array of attributes).
score: the indicator score
found_tag: the tag (id) which made the indicator to get that score
found_related_events (dict): contains info (name, id, threat level id) about all the events that include
the indicator
Please see an example for a response in test_data/reputation_command_response.json
Please see an example for a parsed output in test_data/reputation_command_outputs.json
"""
response = copy.deepcopy(misp_response)
attributes_list = response.get('Attribute')
if not attributes_list:
return None
attributes_list = sorted(attributes_list,
key=lambda attribute_item: attribute_item['event_id'], reverse=True)[:attributes_limit]
found_related_events, attributes_tag_ids, event_tag_ids = prepare_attributes_array_to_context_data(attributes_list)
attribute_in_event_with_bad_threat_level = found_event_with_bad_threat_level_id(found_related_events)
score, found_tag = get_score(attribute_tags_ids=attributes_tag_ids, event_tags_ids=event_tag_ids,
malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level=attribute_in_event_with_bad_threat_level)
formatted_response = replace_keys_from_misp_to_context_data({'Attribute': attributes_list})
return formatted_response, score, found_tag, found_related_events
def prepare_attributes_array_to_context_data(attributes_list):
attributes_tag_ids, event_tag_ids = set(), set()
found_related_events = {}
if not attributes_list:
return None
for attribute in attributes_list:
attribute.pop("RelatedAttribute") # get rid of this useless list
event = attribute.get('Event')
convert_timestamp_to_readable(attribute, event)
found_related_events[event.get("id")] = {"Event Name": event.get("info"),
"Threat Level ID": event.get('threat_level_id'),
"Event ID": event.get("id")}
if event.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(event, True)
event['Tag'] = limit_tag_output
event_tag_ids.update(tag_ids)
if attribute.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(attribute, False)
attribute['Tag'] = limit_tag_output
attributes_tag_ids.update(tag_ids)
return found_related_events, attributes_tag_ids, event_tag_ids
def convert_timestamp_to_readable(attribute, event):
if attribute.get('timestamp'):
attribute['timestamp'] = misp_convert_timestamp_to_date_string(attribute.get('timestamp'))
if event:
if event.get('timestamp'):
attribute['Event']['timestamp'] = misp_convert_timestamp_to_date_string(event.get('timestamp'))
if event.get('publish_timestamp'):
attribute['Event']['publish_timestamp'] = misp_convert_timestamp_to_date_string(
event.get('publish_timestamp'))
def found_event_with_bad_threat_level_id(found_related_events):
bad_threat_level_ids | |
probably
# throw some sort of HTTP exception
else:
raise Exception()
# TML import is distinguished by having an {'Accept': 'text/plain'} header on the POST
def metadata_tml_import(
self,
tml: Union[Dict, List[Dict]],
create_new_on_server: bool = False,
validate_only: bool = False,
formattype: str = 'JSON'
) -> Dict:
endpoint = 'metadata/tml/import'
# allow JSON or YAML in any casing
formattype = formattype.upper()
# Adjust for single Dict
if not isinstance(tml, list):
tml_list = [tml]
else:
tml_list = tml
if formattype == 'JSON':
json_encoded_tml = json.dumps(tml_list)
elif formattype == 'YAML':
json_encoded_tml = json.dumps(tml_list)
# Assume it's just a Python object which will dump to JSON matching the TML format
else:
json_encoded_tml = json.dumps(tml_list)
import_policy = 'ALL_OR_NONE'
if validate_only is True:
import_policy = 'VALIDATE_ONLY'
post_data = {
'import_objects': json_encoded_tml,
'import_policy': import_policy,
'force_create': str(create_new_on_server).lower()
}
url = self.base_url + endpoint
# TML import is distinguished by having an {'Accept': 'text/plain'} header on the POST
response = self.session.post(url=url, data=post_data, headers={'Accept': 'text/plain'})
response.raise_for_status()
# Extra parsing of some 'error responses' that come through in JSON response on HTTP 200
self.raise_tml_errors(response=response)
return response.json()
#
# PARTNER methods
#
def partner_snowflake_user(self, body: Dict) -> Dict:
endpoint = 'partner/snowflake/user'
post_data = {'body': json.dumps(body)}
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
#
# SECURITY methods
#
# Content in ThoughtSpot belongs to its author/owner
# It can be shared to other Groups or Users
#
# There is a particular JSON object structure for giving sharing permissions
# This method gives you a blank permissions Dict for that purpose
@staticmethod
def get_sharing_permissions_dict() -> Dict:
sharing_dict = {'permissions': {}}
return sharing_dict
# This method takes in an existing permissions Dict and adds a new entry to it
# It returns back the permissions Dict but there is never a copy, it acts upon the Dict passed in
@staticmethod
def add_permission_to_dict(permissions_dict: dict, guid: str, share_mode: str) -> Dict:
for l1 in permissions_dict:
permissions_dict[l1][guid] = {'shareMode': share_mode}
return permissions_dict
# Share any object type
# Requires a Permissions Dict, which can be generated and modified with the two static methods above
def security_share(
self,
shared_object_type: str,
shared_object_guids: List[str],
permissions: Dict,
notify_users: Optional[bool] = False,
message: Optional[str] = None,
email_shares: List[str] = None,
use_custom_embed_urls: bool = False
) -> bool:
if email_shares is None:
email_shares = []
endpoint = 'security/share'
post_data = {
'type': shared_object_type,
'id': json.dumps(shared_object_guids),
'permission': json.dumps(permissions),
'notify': str(notify_users).lower(),
'emailshares': json.dumps(email_shares),
'useCustomEmbedUrls': str(use_custom_embed_urls).lower()
}
if message is not None:
post_data['message'] = message
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return True
# Shares just a single viz within a Pinboard, without more complex sharing permissions of security/share
def security_shareviz(
self,
shared_object_type: str,
pinboard_guid: str,
viz_guid: str,
principal_ids: List[str],
notify_users: Optional[bool]=False,
message: Optional[str]=None,
email_shares: List[str]=None,
use_custom_embed_urls: bool=False
) -> bool:
if email_shares is None:
email_shares = []
endpoint = 'security/shareviz'
post_data = {
'type': shared_object_type,
'pinboardId': pinboard_guid,
'principalids': json.dumps(principal_ids),
'vizid': viz_guid,
'notify': str(notify_users).lower(),
'emailshares': json.dumps(email_shares),
'useCustomEmbedUrls': str(use_custom_embed_urls).lower()
}
if message is not None:
post_data['message'] = message
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return True
def security_metadata_permissions(self, object_type: str, object_guids: List[str], dependent_share: bool = False,
permission_type: str = 'EFFECTIVE'):
endpoint = 'security/metadata/permissions'
url_params = {
'type': object_type,
'id': json.dumps(object_guids),
'dependentshare': str(dependent_share).lower(),
'permissiontype': permission_type
}
url = self.base_url + endpoint
response = self.session.get(url=url, params=url_params)
response.raise_for_status()
return response.json()
def security_metadata_permissions_by_id(self, object_type: str, object_guid: str, dependent_share: bool = False,
permission_type: str = 'EFFECTIVE'):
endpoint = 'security/metadata/{}/permissions'.format(object_guid)
url_params = {
'type': object_type,
'dependentshare': str(dependent_share).lower(),
'permissiontype': permission_type
}
url = self.base_url + endpoint
response = self.session.get(url=url, params=url_params)
response.raise_for_status()
return response.json()
# ids_by_type is JSON in format { "{object_type_1} : ["{guid_1}, "{guid_2}"], "{object_type_2}" : ["{guid_3}"...] }
def security_effectivepermissionbulk(self, ids_by_type: Dict, dependent_share: bool = False,):
endpoint = 'security/effectivepermissionbulk'
post_data = {
'idsbytype': json.dumps(ids_by_type),
'dependentshare': str(dependent_share).lower()
}
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
#
# SESSION Methods
#
# Home Pinboard Methods
def session_homepinboard__post(self, pinboard_guid: str, user_guid: str) -> Dict:
endpoint = 'session/homepinboard'
post_data = {
'id': pinboard_guid,
'userid': user_guid
}
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
def session_homepinboard__get(self) -> Dict:
endpoint = 'session/homepinboard'
url = self.base_url + endpoint
response = self.session.get(url=url)
response.raise_for_status()
return response.json()
def session_homepinboard__delete(self) -> bool:
endpoint = 'session/homepinboard'
url = self.base_url + endpoint
response = self.session.delete(url=url)
response.raise_for_status()
return True
# NOTE:
#
# session/login/token is not implemented here, it is intended for a browser login
#
# The below shows an implementation of session/auth/token but it should only be
# used from Authenticator Server with Secret Key retrieved in a secure manner only
# in memory
#
# def session_auth_token(self, secret_key: str, username: str, access_level: str, object_id: str):
# post_params = {
# 'secret_key': secret_key,
# 'username': username,
# 'access_level': access_level,
# 'id': object_id
# }
# response = self.post_to_endpoint('session/auth/token', post_data=post_params)
# return response
#
# USER Methods
#
def user__get(self, user_id: Optional[str] = None, name: Optional[str] = None) -> Union[Dict, List]:
endpoint = 'user/'
url_params = {}
if user_id is not None:
url_params['userid'] = user_id
if name is not None:
url_params['name'] = name
url = self.base_url + endpoint
response = self.session.get(url=url, params=url_params)
response.raise_for_status()
return response.json()
def user__post(self, username: str, password: <PASSWORD>, display_name: str, properties: Optional,
groups: Optional[List[str]] = None, user_type: str = 'LOCAL_USER',
tenant_id: Optional[str] = None, visibility: str = 'DEFAULT'):
endpoint = 'user'
post_data = {
'name': username,
'password': password,
'display_name': display_name,
'usertype': user_type,
'visibility': visibility
}
if properties is not None:
post_data['properties'] = properties
if groups is not None:
post_data['groups'] = json.dumps(groups)
if tenant_id is not None:
post_data['tenantid'] = tenant_id
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
def user__delete(self, user_guid: str):
endpoint = 'user/{}'.format(user_guid)
url = self.base_url + endpoint
response = self.session.delete(url=url)
response.raise_for_status()
return True
def user__put(self, user_guid: str, content, password: Optional[str]):
endpoint = 'user/{}'.format(user_guid)
post_data = {
}
if content is not None:
post_data['content'] = content
if password is not None:
post_data['password'] = password
url = self.base_url + endpoint
response = self.session.put(url=url, data=post_data)
response.raise_for_status()
return response.json()
def user_updatepassword(self, username: str, current_password: str, new_password: str) -> Dict:
endpoint = 'user/updatepassword'
post_data = {
'name': username,
'currentpassword': <PASSWORD>,
'newpassword': <PASSWORD>
}
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
# Implementation of the user/sync endpoint, which is fairly complex and runs a risk
# with the remove_deleted option set to true
#
# Uses a multi-part POST, with the type of the principals parameter set to application/json
def user_sync(
self,
principals_file: str,
password: str,
apply_changes: bool = False,
remove_deleted: bool = False
) -> Dict:
endpoint = 'user/sync'
# You must set the type of principals to 'application/json' or 'text/json'
files = {
'principals': ('principals.json', principals_file, 'application/json'),
'applyChanges': str(apply_changes).lower(),
'removeDelete': str(remove_deleted).lower(),
'password': password
}
url = self.base_url + endpoint
response = self.session.post(url=url, data=None, files=files)
response.raise_for_status()
return response.json()
def user_transfer_ownership(self, current_owner_username: str, new_owner_username: str,
object_guids: Optional[List[str]] = None) -> Dict:
endpoint = 'user/transfer/ownership'
url_params = {
'fromUserName': current_owner_username,
'toUserName': new_owner_username
}
if object_guids is not None:
url_params['objectid'] = json.dumps(object_guids)
url = self.base_url + endpoint
response = self.session.post(url=url, params=url_params)
response.raise_for_status()
return response.json()
# NOTE: preferences and preferencesProto are a big ?
def user_updatepreference(self, user_guid: str, username: str, preferences: Dict, preferencesProto: str) -> Dict:
endpoint = 'user/updatepreference'
post_data = {
'userid': user_guid,
'username': username,
'preferences': json.dumps(preferences),
'preferencesProto': preferencesProto
}
url = self.base_url + endpoint
response = self.session.post(url=url, data=post_data)
response.raise_for_status()
return response.json()
# Retrieves all USER and USER_GROUP objects
def user_list(self) -> Dict:
endpoint = 'user/list'
url = self.base_url + endpoint
response = self.session.get(url=url)
response.raise_for_status()
return response.json()
def user_email(self, user_guid: str, user_email: str):
endpoint = 'user/email'
post_data = {
'userid': user_guid,
'emailid': user_email
}
url = self.base_url + endpoint
response = self.session.put(url=url, data=post_data)
response.raise_for_status()
return response.json()
def user_groups__get(self, user_guid: str):
endpoint = 'user/{}/groups'.format(user_guid)
url = self.base_url + endpoint
response = self.session.get(url=url)
response.raise_for_status()
return response.json()
# Replaces all group | |
"""Environment for training the acceleration behavior of vehicles in a ring."""
import numpy as np
from gym.spaces import Box, Discrete
from flow.core import rewards
from flow.envs.ring.accel import AccelEnv
from flow.envs.multiagent.base import MultiEnv
ADDITIONAL_ENV_PARAMS = {
# maximum acceleration for autonomous vehicles, in m/s^2
"max_accel": 1,
# maximum deceleration for autonomous vehicles, in m/s^2
"max_decel": 1,
# desired velocity for all vehicles in the network, in m/s
"target_velocity": 20,
}
ACTION = [-1, 0, 1]
class AdversarialAccelEnv(AccelEnv, MultiEnv):
"""Adversarial multi-agent acceleration env.
States
The observation of both the AV and adversary agent consist of the
velocities and absolute position of all vehicles in the network. This
assumes a constant number of vehicles.
Actions
* AV: The action space of the AV agent consists of a vector of bounded
accelerations for each autonomous vehicle. In order to ensure safety,
these actions are further bounded by failsafes provided by the
simulator at every time step.
* Adversary: The action space of the adversary agent consists of a
vector of perturbations to the accelerations issued by the AV agent.
These are directly added to the original accelerations by the AV
agent.
Rewards
* AV: The reward for the AV agent is equal to the mean speed of all
vehicles in the network.
* Adversary: The adversary receives a reward equal to the negative
reward issued to the AV agent.
Termination
A rollout is terminated if the time horizon is reached or if two
vehicles collide into one another.
"""
def _apply_rl_actions(self, rl_actions):
"""See class definition."""
sorted_rl_ids = [
veh_id for veh_id in self.sorted_ids
if veh_id in self.k.vehicle.get_rl_ids()
]
av_action = rl_actions['av']
adv_action = rl_actions['adversary']
perturb_weight = self.env_params.additional_params['perturb_weight']
rl_action = av_action + perturb_weight * adv_action
self.k.vehicle.apply_acceleration(sorted_rl_ids, rl_action)
def compute_reward(self, rl_actions, **kwargs):
"""Compute opposing rewards for agents.
The agent receives the class definition reward,
the adversary receives the negative of the agent reward
"""
if self.env_params.evaluate:
reward = np.mean(self.k.vehicle.get_speed(
self.k.vehicle.get_ids()))
return {'av': reward, 'adversary': -reward}
else:
reward = rewards.desired_velocity(self, fail=kwargs['fail'])
return {'av': reward, 'adversary': -reward}
def get_state(self, **kwargs):
"""See class definition for the state.
The adversary state and the agent state are identical.
"""
state = np.array([[
self.k.vehicle.get_speed(veh_id) / self.k.network.max_speed(),
self.k.vehicle.get_x_by_id(veh_id) / self.k.network.length()
] for veh_id in self.sorted_ids])
state = np.ndarray.flatten(state)
return {'av': state, 'adversary': state}
class MultiAgentAccelPOEnv(MultiEnv):
"""Multi-agent acceleration environment for shared policies.
This environment can used to train autonomous vehicles to achieve certain
desired speeds in a decentralized fashion. This should be applicable to
both closed and open network settings.
Required from env_params:
* max_accel: maximum acceleration for autonomous vehicles, in m/s^2
* max_decel: maximum deceleration for autonomous vehicles, in m/s^2
* target_velocity: desired velocity for all vehicles in the network, in m/s
States
The observation of each agent (i.e. each autonomous vehicle) consists
of the speeds and bumper-to-bumper headways of the vehicles immediately
preceding and following autonomous vehicle, as well as the absolute
position and ego speed of the autonomous vehicles. This results in a
state space of size 6 for each agent.
Actions
The action space for each agent consists of a scalar bounded
acceleration for each autonomous vehicle. In order to ensure safety,
these actions are further bounded by failsafes provided by the
simulator at every time step.
Rewards
The reward function is the two-norm of the distance of the speed of the
vehicles in the network from the "target_velocity" term. For a
description of the reward, see: flow.core.rewards.desired_speed. This
reward is shared by all agents.
Termination
A rollout is terminated if the time horizon is reached or if two
vehicles collide into one another.
"""
def __init__(self, env_params, sim_params, network, simulator='traci'):
for p in ADDITIONAL_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError(
'Environment parameter "{}" not supplied'.format(p))
self.leader = []
self.follower = []
super().__init__(env_params, sim_params, network, simulator)
@property
def action_space(self):
"""See class definition."""
return Box(
low=-abs(self.env_params.additional_params["max_decel"]),
high=self.env_params.additional_params["max_accel"],
shape=(1, ),
dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(low=-5, high=5, shape=(6,), dtype=np.float32)
def _apply_rl_actions(self, rl_actions):
"""See class definition."""
for veh_id in self.k.vehicle.get_rl_ids():
self.k.vehicle.apply_acceleration(veh_id, rl_actions[veh_id])
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
# Compute the common reward.
reward = rewards.desired_velocity(self, fail=kwargs['fail'])
# Reward is shared by all agents.
return {key: reward for key in self.k.vehicle.get_rl_ids()}
def get_state(self, **kwargs): # FIXME
"""See class definition."""
self.leader = []
self.follower = []
obs = {}
# normalizing constants
max_speed = self.k.network.max_speed()
max_length = self.k.network.length()
for rl_id in self.k.vehicle.get_rl_ids():
this_pos = self.k.vehicle.get_x_by_id(rl_id)
this_speed = self.k.vehicle.get_speed(rl_id)
lead_id = self.k.vehicle.get_leader(rl_id)
follower = self.k.vehicle.get_follower(rl_id)
if lead_id in ["", None]:
# in case leader is not visible
lead_speed = max_speed
lead_head = max_length
else:
self.leader.append(lead_id)
lead_speed = self.k.vehicle.get_speed(lead_id)
lead_head = self.k.vehicle.get_x_by_id(lead_id) \
- self.k.vehicle.get_x_by_id(rl_id) \
- self.k.vehicle.get_length(rl_id)
if follower in ["", None]:
# in case follower is not visible
follow_speed = 0
follow_head = max_length
else:
self.follower.append(follower)
follow_speed = self.k.vehicle.get_speed(follower)
follow_head = self.k.vehicle.get_headway(follower)
# Add the next observation.
obs[rl_id] = np.array([
this_pos / max_length,
this_speed / max_speed,
(lead_speed - this_speed) / max_speed,
lead_head / max_length,
(this_speed - follow_speed) / max_speed,
follow_head / max_length
])
return obs
def additional_command(self):
"""See parent class.
This method defines which vehicles are observed for visualization
purposes.
"""
# specify observed vehicles
for veh_id in self.leader + self.follower:
self.k.vehicle.set_observed(veh_id)
def reset(self):
"""See parent class.
In addition, a few variables that are specific to this class are
emptied before they are used by the new rollout.
"""
self.leader = []
self.follower = []
return super().reset()
class MultiAgentEightEnv(MultiEnv):
def __init__(self, env_params, sim_params, network, simulator='traci'):
for p in ADDITIONAL_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError(
'Environment parameter "{}" not supplied'.format(p))
self.leader = []
self.follower = []
super().__init__(env_params, sim_params, network, simulator)
@property
def action_space(self):
"""See class definition."""
return Discrete(3)
# return Box(
# low=-abs(self.env_params.additional_params["max_decel"]),
# high=self.env_params.additional_params["max_accel"],
# shape=(1, ),
# dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(low=-5, high=5, shape=(6,), dtype=np.float32)
# def state_space(self):
# """See class definition."""
# return Box(low=-5, high=5, shape=(6*N,), dtype=np.float32)
def _apply_rl_actions(self, rl_actions):
"""See class definition."""
for veh_id in self.k.vehicle.get_rl_ids():
index = rl_actions[veh_id]
self.k.vehicle.apply_acceleration(veh_id, ACTION[index])
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
reward = np.mean(self.k.vehicle.get_speed(
self.k.vehicle.get_ids())) / 100
crash = self.k.simulation.check_collision()
if crash:
reward = -10
# return reward
# Reward is shared by all agents.
return {key: reward for key in self.k.vehicle.get_rl_ids()}
def get_state(self, **kwargs): # FIXME
"""See class definition."""
self.leader = []
self.follower = []
obs = {}
# normalizing constants
max_speed = self.k.network.max_speed()
max_length = self.k.network.length()
for rl_id in self.k.vehicle.get_rl_ids():
this_pos = self.k.vehicle.get_x_by_id(rl_id)
this_speed = self.k.vehicle.get_speed(rl_id)
lead_id = self.k.vehicle.get_leader(rl_id)
follower = self.k.vehicle.get_follower(rl_id)
if lead_id in ["", None]:
# in case leader is not visible
lead_speed = max_speed
lead_head = max_length
else:
self.leader.append(lead_id)
lead_speed = self.k.vehicle.get_speed(lead_id)
lead_head = self.k.vehicle.get_x_by_id(lead_id) \
- self.k.vehicle.get_x_by_id(rl_id) \
- self.k.vehicle.get_length(rl_id)
if follower in ["", None]:
# in case follower is not visible
follow_speed = 0
follow_head = max_length
else:
self.follower.append(follower)
follow_speed = self.k.vehicle.get_speed(follower)
follow_head = self.k.vehicle.get_headway(follower)
# Add the next observation.
obs[rl_id] = np.array([
this_pos / max_length,
this_speed / max_speed,
(lead_speed - this_speed) / max_speed,
lead_head / max_length,
(this_speed - follow_speed) / max_speed,
follow_head / max_length
])
return obs
def additional_command(self):
"""See parent class.
This method defines which vehicles are observed for visualization
purposes.
"""
# specify observed vehicles
for veh_id in self.leader + self.follower:
self.k.vehicle.set_observed(veh_id)
def reset(self):
"""See parent class.
In addition, a few variables that are specific to this class are
emptied before they are used by the new rollout.
"""
self.leader = []
self.follower = []
return super().reset()
class MultiAgentMinicityEnv(MultiEnv):
def __init__(self, env_params, sim_params, network, simulator='traci'):
for p in ADDITIONAL_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError(
'Environment parameter "{}" not supplied'.format(p))
self.leader = []
self.follower = []
super().__init__(env_params, sim_params, network, simulator)
@property
def action_space(self):
"""See class definition."""
return Discrete(3)
# return Box(
# low=-abs(self.env_params.additional_params["max_decel"]),
# high=self.env_params.additional_params["max_accel"],
# shape=(1, ),
# dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(low=-5, high=5, shape=(6,), dtype=np.float32)
# def state_space(self):
# """See class definition."""
# return Box(low=-5, high=5, shape=(6*N,), dtype=np.float32)
def _apply_rl_actions(self, rl_actions):
"""See class definition."""
for veh_id in | |
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
import array
import asyncio
import gc
import logging
import os
import re
import struct
import weakref
from functools import partial
from os import close as close_fd
from . import comm
from ._libs import ucx_api
from ._libs.arr import Array
from .continuous_ucx_progress import BlockingMode, NonBlockingMode
from .exceptions import UCXCanceled, UCXCloseError, UCXError
from .utils import hash64bits, nvtx_annotate
logger = logging.getLogger("ucx")
# The module should only instantiate one instance of the application context
# However, the init of CUDA must happen after all process forks thus we delay
# the instantiation of the application context to the first use of the API.
_ctx = None
def _get_ctx():
global _ctx
if _ctx is None:
_ctx = ApplicationContext()
return _ctx
async def exchange_peer_info(
endpoint, msg_tag, ctrl_tag, guarantee_msg_order, listener
):
"""Help function that exchange endpoint information"""
# Pack peer information incl. a checksum
fmt = "QQ?Q"
my_info = struct.pack(
fmt,
msg_tag,
ctrl_tag,
guarantee_msg_order,
hash64bits(msg_tag, ctrl_tag, guarantee_msg_order),
)
peer_info = bytearray(len(my_info))
my_info_arr = Array(my_info)
peer_info_arr = Array(peer_info)
# Send/recv peer information. Notice, we force an `await` between the two
# streaming calls (see <https://github.com/rapidsai/ucx-py/pull/509>)
if listener is True:
await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes)
await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes)
else:
await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes)
await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes)
# Unpacking and sanity check of the peer information
ret = {}
(
ret["msg_tag"],
ret["ctrl_tag"],
ret["guarantee_msg_order"],
ret["checksum"],
) = struct.unpack(fmt, peer_info)
expected_checksum = hash64bits(
ret["msg_tag"], ret["ctrl_tag"], ret["guarantee_msg_order"]
)
if expected_checksum != ret["checksum"]:
raise RuntimeError(
f'Checksum invalid! {hex(expected_checksum)} != {hex(ret["checksum"])}'
)
if ret["guarantee_msg_order"] != guarantee_msg_order:
raise ValueError("Both peers must set guarantee_msg_order identically")
return ret
class CtrlMsg:
"""Implementation of control messages
For now we have one opcode `1` which means shutdown.
The opcode takes `close_after_n_recv`, which is the number of
messages to receive before the worker should close.
"""
fmt = "QQ"
nbytes = struct.calcsize(fmt)
@staticmethod
def serialize(opcode, close_after_n_recv):
return struct.pack(CtrlMsg.fmt, int(opcode), int(close_after_n_recv))
@staticmethod
def deserialize(serialized_bytes):
return struct.unpack(CtrlMsg.fmt, serialized_bytes)
@staticmethod
def handle_ctrl_msg(ep_weakref, log, msg, future):
"""Function that is called when receiving the control message"""
try:
future.result()
except UCXCanceled:
return # The ctrl signal was canceled
logger.debug(log)
ep = ep_weakref()
if ep is None or ep.closed():
if ep is not None:
ep.abort()
return # The endpoint is closed
opcode, close_after_n_recv = CtrlMsg.deserialize(msg)
if opcode == 1:
ep.close_after_n_recv(close_after_n_recv, count_from_ep_creation=True)
else:
raise UCXError("Received unknown control opcode: %s" % opcode)
@staticmethod
def setup_ctrl_recv(ep):
"""Help function to setup the receive of the control message"""
log = "[Recv shutdown] ep: %s, tag: %s" % (
hex(ep.uid),
hex(ep._tags["ctrl_recv"]),
)
msg = bytearray(CtrlMsg.nbytes)
msg_arr = Array(msg)
shutdown_fut = comm.tag_recv(
ep._ep, msg_arr, msg_arr.nbytes, ep._tags["ctrl_recv"], name=log
)
shutdown_fut.add_done_callback(
partial(CtrlMsg.handle_ctrl_msg, weakref.ref(ep), log, msg)
)
async def _listener_handler_coroutine(
conn_request, ctx, func, guarantee_msg_order, endpoint_error_handling
):
# We create the Endpoint in five steps:
# 1) Create endpoint from conn_request
# 2) Generate unique IDs to use as tags
# 3) Exchange endpoint info such as tags
# 4) Setup control receive callback
# 5) Execute the listener's callback function
endpoint = ucx_api.UCXEndpoint.create_from_conn_request(
ctx.worker, conn_request, endpoint_error_handling
)
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, endpoint.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, endpoint.handle)
peer_info = await exchange_peer_info(
endpoint=endpoint,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
guarantee_msg_order=guarantee_msg_order,
listener=True,
)
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(
endpoint=endpoint, ctx=ctx, guarantee_msg_order=guarantee_msg_order, tags=tags
)
logger.debug(
"_listener_handler() server: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(endpoint.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
# Setup the control receive
CtrlMsg.setup_ctrl_recv(ep)
# Removing references here to avoid delayed clean up
del ctx
# Finally, we call `func`
if asyncio.iscoroutinefunction(func):
await func(ep)
else:
func(ep)
def _listener_handler(
conn_request, callback_func, ctx, guarantee_msg_order, endpoint_error_handling
):
asyncio.ensure_future(
_listener_handler_coroutine(
conn_request,
ctx,
callback_func,
guarantee_msg_order,
endpoint_error_handling,
)
)
def _epoll_fd_finalizer(epoll_fd, progress_tasks):
assert epoll_fd >= 0
# Notice, progress_tasks must be cleared before we close
# epoll_fd
progress_tasks.clear()
close_fd(epoll_fd)
class ApplicationContext:
"""
The context of the Asyncio interface of UCX.
"""
def __init__(self, config_dict={}, blocking_progress_mode=None):
self.progress_tasks = []
# For now, a application context only has one worker
self.context = ucx_api.UCXContext(config_dict)
self.worker = ucx_api.UCXWorker(self.context)
if blocking_progress_mode is not None:
self.blocking_progress_mode = blocking_progress_mode
elif "UCXPY_NON_BLOCKING_MODE" in os.environ:
self.blocking_progress_mode = False
else:
self.blocking_progress_mode = True
if self.blocking_progress_mode:
self.epoll_fd = self.worker.init_blocking_progress_mode()
weakref.finalize(
self, _epoll_fd_finalizer, self.epoll_fd, self.progress_tasks
)
def create_listener(
self,
callback_func,
port=0,
guarantee_msg_order=False,
endpoint_error_handling=None,
):
"""Create and start a listener to accept incoming connections
callback_func is the function or coroutine that takes one
argument -- the Endpoint connected to the client.
Notice, the listening is closed when the returned Listener
goes out of scope thus remember to keep a reference to the object.
Parameters
----------
callback_func: function or coroutine
A callback function that gets invoked when an incoming
connection is accepted
port: int, optional
An unused port number for listening, or `0` to let UCX assign
an unused port.
guarantee_msg_order: boolean, optional
Whether to guarantee message order or not. Remember, both peers
of the endpoint must set guarantee_msg_order to the same value.
endpoint_error_handling: None or boolean, optional
Enable endpoint error handling raising exceptions when an error
occurs, may incur in performance penalties but prevents a process
from terminating unexpectedly that may happen when disabled.
None (default) will enable endpoint error handling based on the
UCX version, enabling for UCX >= 1.11.0 and disabled for any
versions prior to that. This is done to prevent CUDA IPC to be
quietly disabled due to lack of support in older UCX versions.
Explicitly specifying True/False will override the default.
Returns
-------
Listener
The new listener. When this object is deleted, the listening stops
"""
self.continuous_ucx_progress()
if port is None:
port = 0
if endpoint_error_handling is None:
endpoint_error_handling = get_ucx_version() >= (1, 11, 0)
logger.info("create_listener() - Start listening on port %d" % port)
ret = Listener(
ucx_api.UCXListener(
worker=self.worker,
port=port,
cb_func=_listener_handler,
cb_args=(
callback_func,
self,
guarantee_msg_order,
endpoint_error_handling,
),
)
)
return ret
async def create_endpoint(
self, ip_address, port, guarantee_msg_order, endpoint_error_handling=None
):
"""Create a new endpoint to a server
Parameters
----------
ip_address: str
IP address of the server the endpoint should connect to
port: int
IP address of the server the endpoint should connect to
guarantee_msg_order: boolean, optional
Whether to guarantee message order or not. Remember, both peers
of the endpoint must set guarantee_msg_order to the same value.
endpoint_error_handling: None or boolean, optional
Enable endpoint error handling raising exceptions when an error
occurs, may incur in performance penalties but prevents a process
from terminating unexpectedly that may happen when disabled.
None (default) will enable endpoint error handling based on the
UCX version, enabling for UCX >= 1.11.0 and disabled for any
versions prior to that. This is done to prevent CUDA IPC to be
quietly disabled due to lack of support in older UCX versions.
Explicitly specifying True/False will override the default.
Returns
-------
Endpoint
The new endpoint
"""
self.continuous_ucx_progress()
if endpoint_error_handling is None:
endpoint_error_handling = get_ucx_version() >= (1, 11, 0)
ucx_ep = ucx_api.UCXEndpoint.create(
self.worker, ip_address, port, endpoint_error_handling
)
self.worker.progress()
# We create the Endpoint in three steps:
# 1) Generate unique IDs to use as tags
# 2) Exchange endpoint info such as tags
# 3) Use the info to create an endpoint
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, ucx_ep.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, ucx_ep.handle)
peer_info = await exchange_peer_info(
endpoint=ucx_ep,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
guarantee_msg_order=guarantee_msg_order,
listener=False,
)
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(
endpoint=ucx_ep,
ctx=self,
guarantee_msg_order=guarantee_msg_order,
tags=tags,
)
logger.debug(
"create_endpoint() client: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(ep._ep.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
# Setup the control receive
CtrlMsg.setup_ctrl_recv(ep)
return ep
def continuous_ucx_progress(self, event_loop=None):
"""Guarantees continuous UCX progress
Use this function to associate UCX progress with an event loop.
Notice, multiple event loops can be associate with UCX progress.
This function is automatically called when calling
`create_listener()` or `create_endpoint()`.
Parameters
----------
event_loop: asyncio.event_loop, optional
The event loop to evoke UCX progress. If None,
`asyncio.get_event_loop()` is used.
"""
loop = event_loop if event_loop is not None else | |
<reponame>hpaugam33/vehicule-detection
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------
# --- Author : <NAME>
# --- Mail : <EMAIL>
# --- Date : 27th January 2018
# ----------------------------------------------
# Imports
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
import numpy as np
import csv
import time
import imageio
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
#Import interface graphique
import tkinter as tk, threading
from tkinter.messagebox import *
from tkinter.filedialog import *
from PIL import ImageTk, Image
# Object detection imports
from utils import label_map_util
from utils import visualization_utils as vis_util
"""
#Ecriture
label = Label(window, text="Vehicule Detection")
#bouton
boutonClose=Button(window, text="Fermer", command=window.quit)
# entrée
value = StringVar()
value.set("texte par défaut")
entree = Entry(window, textvariable=value, width=30)
# checkbutton
checkBouton = Checkbutton(window, text="Nouveau?")
# radiobutton
value = StringVar()
bouton1 = Radiobutton(window, text="Oui", variable=value, value=1)
bouton2 = Radiobutton(window, text="Non", variable=value, value=2)
bouton3 = Radiobutton(window, text="Peu être", variable=value, value=3)
# liste
liste = Listbox(window)
liste.insert(1, "Python")
liste.insert(2, "PHP")
liste.insert(3, "jQuery")
liste.insert(4, "CSS")
liste.insert(5, "Javascript")
# canvas
canvas = Canvas(window, width=150, height=150, background='blue')
ligne1 = canvas.create_line(75, 0, 75, 150)
ligne2 = canvas.create_line(0, 75, 150, 75)
txt = canvas.create_text(75, 60, text="Cible", font="Arial 16 italic", fill="blue")
#Possibilities of creation for a canvas
#create_arc() : arc de cercle
#create_bitmap() : bitmap
#create_image() : image
#create_line() : ligne
#create_oval() : ovale
#create_polygon() : polygone
#create_rectangle() : rectangle
#create_text() : texte
#create_window() : window
#Scale
value = DoubleVar()
scale = Scale(window, variable=value)
#Frames
# frame 1
Frame1 = Frame(window, borderwidth=2, relief=GROOVE)
Frame1.pack(side=LEFT, padx=30, pady=30)
# frame 2
Frame2 = Frame(window, borderwidth=2, relief=GROOVE)
Frame2.pack(side=LEFT, padx=10, pady=10)
# frame 3 dans frame 2
Frame3 = Frame(Frame2, bg="white", borderwidth=2, relief=GROOVE)
Frame3.pack(side=RIGHT, padx=5, pady=5)
# Ajout de labels
Label(Frame1, text="Frame 1").pack(padx=10, pady=10)
Label(Frame2, text="Frame 2").pack(padx=10, pady=10)
Label(Frame3, text="Frame 3",bg="white").pack(padx=10, pady=10)
#PanedWindow
p = PanedWindow(window, orient=HORIZONTAL)
p.pack(side=BOTTOM, expand=N, fill=BOTH, pady=0, padx=0)
p.add(Label(p, text='Volet 1', background='blue', anchor=CENTER))
p.add(Label(p, text='Volet 2', background='white', anchor=CENTER) )
p.add(Label(p, text='Volet 3', background='red', anchor=CENTER) )
#SpinBox
s = Spinbox(window, from_=0, to=10)
#Message Erreur
def callback():
if askyesno('Titre 1', 'Êtes-vous sûr de vouloir faire ça?'):
showwarning('Titre 2', 'Tant pis...')
else:
showinfo('Titre 3', 'Vous avez peur!')
showerror("Titre 4", "Aha")
Button(text='Action', command=callback).pack()
#Barre de Menus
def alert():
showinfo("alerte", "Bravo!")
menubar = Menu(window)
menu1 = Menu(menubar, tearoff=0)
menu1.add_command(label="Créer", command=alert)
menu1.add_command(label="Editer", command=alert)
menu1.add_separator()
menu1.add_command(label="Quitter", command=window.quit)
menubar.add_cascade(label="Fichier", menu=menu1)
menu2 = Menu(menubar, tearoff=0)
menu2.add_command(label="Couper", command=alert)
menu2.add_command(label="Copier", command=alert)
menu2.add_command(label="Coller", command=alert)
menubar.add_cascade(label="Editer", menu=menu2)
menu3 = Menu(menubar, tearoff=0)
menu3.add_command(label="A propos", command=alert)
menubar.add_cascade(label="Aide", menu=menu3)
window.config(menu=menubar)
#Change le curseur quand on survole le bouton clock
Button(window, text ="clock", relief=RAISED, cursor="clock").pack()
#Recupere valeur Input
def recupere():
showinfo("Alerte", entree.get())
value = StringVar()
value.set("Valeur")
entree = Entry(window, textvariable=value, width=30)
entree.pack()
bouton = Button(window, text="Valider", command=recupere)
bouton.pack()
#Ouvre un fichier
filepath = askopenfilename(title="Ouvrir une image",filetypes=[('png files','.png'),('all files','.*')])
photo = PhotoImage(file=filepath)
canvas = Canvas(window, width=photo.width(), height=photo.height(), bg="yellow")
canvas.create_image(0, 0, anchor=NW, image=photo)
canvas.pack()
#Evenements
def clavier(event):
touche = event.keysym
print(touche)
canvas = Canvas(window, width=500, height=500)
canvas.focus_set()
canvas.bind("<Key>", clavier)
canvas.pack()
#Exemple de fonction pour bouger un carre avec fleches
# fonction appellée lorsque l'utilisateur presse une touche
def clavier(event):
global coords
touche = event.keysym
if touche == "Up":
coords = (coords[0], coords[1] - 10)
elif touche == "Down":
coords = (coords[0], coords[1] + 10)
elif touche == "Right":
coords = (coords[0] + 10, coords[1])
elif touche == "Left":
coords = (coords[0] -10, coords[1])
# changement de coordonnées pour le rectangle
canvas.coords(rectangle, coords[0], coords[1], coords[0]+25, coords[1]+25)
# création du canvas
canvas = Canvas(window, width=250, height=250, bg="ivory")
# coordonnées initiales
coords = (0, 0)
# création du rectangle
rectangle = canvas.create_rectangle(0,0,25,25,fill="violet")
# ajout du bond sur les touches du clavier
canvas.focus_set()
canvas.bind("<Key>", clavier)
# création du canvas
canvas.pack()
liste.pack()
entree.pack()
label.pack()
checkBouton.pack()
s.pack()
bouton1.pack()
bouton2.pack()
bouton3.pack()
canvas.pack()
scale.pack()
p.pack()
boutonClose.pack()
#Relief
b1 = Button(window, text ="FLAT", relief=FLAT).pack()
b2 = Button(window, text ="RAISED", relief=RAISED).pack()
b3 = Button(window, text ="SUNKEN", relief=SUNKEN).pack()
b4 = Button(window, text ="GROOVE", relief=GROOVE).pack()
b5 = Button(window, text ="RIDGE", relief=RIDGE).pack()
"""
#Use cam or video recorded ?
#Adjust the line to the image
#Select your video from the database
VIDEO_FOLDER = 'video dataset'
#filepath = \
# askopenfilename(title="Open your video",filetypes=[('mp4 files','.mp4'), ('mkv files','.mkv'),('avi files','.avi'),('all files','.*')])
#cap = cv2.VideoCapture(filepath)
cap = cv2.VideoCapture(1)
#frame dimension
width = cap.get(3)
height = cap.get(4)
#Frame number
frame_number = cap.get(7)
#Time of the detected object
time= cap.get(0)
#Select a certain frame in the video to calibrate
chosen_frame = frame_number/2
cap.set(2,10);
# Capture from camera
#cap = cv2.VideoCapture(0)
#Window creation
window = Tk()
window.title("Vehicule detection")
window['bg']='white'
# Create a frame for the video
videoFrame= Frame(window, bg="white")
videoFrame.pack(side=LEFT, padx=2, pady=2)
#videoFrame.grid()
# Create a frame for the user parameters
configFrame = Frame(window, bg="white", borderwidth=2, relief=GROOVE)
configFrame.pack(side=RIGHT, padx=0, pady=0)
# Add Labels for the frames
#Label(videoFrame, text="Vidéo Output").pack(padx=10, pady=10)
Label(configFrame, text="User Parameters").pack(padx=10, pady=10)
# Create a label in the frame
lmain = Label(videoFrame)
#lmain.grid()
canvas = Canvas(window, width=width, height = height)
canvas.pack()
#Barre de Menus
def alert():
showinfo("alerte", "Bravo!")
menubar = Menu(window)
menu1 = Menu(menubar, tearoff=0)
menu1.add_command(label="Créer", command=alert)
menu1.add_command(label="Editer", command=alert)
menu1.add_separator()
menu1.add_command(label="Quitter", command=window.quit)
menubar.add_cascade(label="Fichier", menu=menu1)
menu2 = Menu(menubar, tearoff=0)
menu2.add_command(label="Couper", command=alert)
menu2.add_command(label="Copier", command=alert)
menu2.add_command(label="Coller", command=alert)
menubar.add_cascade(label="Editer", menu=menu2)
menu3 = Menu(menubar, tearoff=0)
menu3.add_command(label="A propos", command=alert)
menubar.add_cascade(label="Aide", menu=menu3)
window.config(menu=menubar)
""" Organize the config frame """
changex0 = False
def retour():
changex0 = True
print(changex0)
# Input
lineParameters = Frame(configFrame, bg="white", borderwidth=2, relief=GROOVE)
lineParameters.pack()
param1 = Label(lineParameters, text="x0", bg="white")
param1.pack( side = LEFT)
valueX0Line = StringVar()
x0value = Entry(lineParameters, textvariable=valueX0Line, width=20)
x0value.pack(side = RIGHT)
print(type(valueX0Line.get()))
Button (lineParameters, text = "Valider", command=retour).pack(side = RIGHT)
#Scales
value = DoubleVar()
horizontalScale = Scale(configFrame, orient='horizontal', from_=0, to=10, resolution=0.1, tickinterval=2, length=300, label='Horizontal Size')
verticalScale = Scale(configFrame, orient='vertical', from_=0, to=10, resolution=0.1, tickinterval=2, length=300, label='Vertical Size')
horizontalScale.pack()
verticalScale.pack()
# function for video streaming
def video_stream():
ret, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.config(image=imgtk)
lmain.after(1, video_stream)
image = canvas.create_image(width/2, height/2, image=imgtk)
if changex0 == False :
line = canvas.create_line(0, height/2, width,height/2 , fill="red", width=2)
else :
print(changex0)
line = canvas.create_line(valueX0Line.get(), height/2, width,height/2 , fill="red", width=2)
video_stream()
window.mainloop()
#Creation of an excel file recording detection datas
with open('traffic_measurement.csv', 'w') as f:
writer = csv.writer(f)
#Categories recorded
csv_line = \
'Vehicle Type/Size, Vehicle Movement Direction, Vehicle Speed (km/h), Time (s)'
writer.writerows([csv_line.split(',')])
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!'
)
# Variables
total_passed_vehicle = 0 # using it to count vehicles
# By default I use an "SSD with Mobilenet" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09' #'ssdlite_mobilenet_v2_coco_2018_05_097'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = \
'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
#oid_bbox_trainable_label_map.pbtxt
#pascal_label_map.pbtxt
NUM_CLASSES = 90
# Download Model
# uncomment if you have not download the model yet
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts 5, we know that this corresponds to airplane. Here I use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width,
1)).astype(np.uint8)
# Detection
def object_detection_function():
total_passed_vehicle = 0
speed = 'waiting...'
direction = 'waiting...'
size = 'waiting...'
time_s = 0
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# for all the frames that are extracted from input video
while cap.isOpened():
(ret, frame) = cap.read()
if not ret:
print ('end of the video file...')
break
input_frame = frame
time_ms = cap.get(0)
time_s = int(time_ms/1000)
time = str(time_s)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(input_frame, axis=0)
# Actual detection.
(boxes, scores, classes, num) = \
sess.run([detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
#print([category_index.get(i) for i in classes[0]])
#print(scores)
# Visualization of the results of a detection.
(counter, csv_line) = \
vis_util.visualize_boxes_and_labels_on_image_array(
cap.get(1),
input_frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
time_s,
use_normalized_coordinates=True,
line_thickness=3,
)
total_passed_vehicle = total_passed_vehicle + counter
# insert information text to video frame
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(
input_frame,
'Detected Vehicles: ' + str(total_passed_vehicle),
(10, 35),
font,
0.8,
(0, 0xFF, 0xFF),
2,
cv2.FONT_HERSHEY_SIMPLEX,
)
# when the vehicle passed over line and counted, make the color of ROI line green
line_width = int(width);
line_height = int(height/1.2);
if counter == 1:
cv2.line(input_frame, (0, line_height), (line_width, line_height), (0, 0xFF, 0), 5)
else:
cv2.line(input_frame, (0, line_height), (line_width, line_height), (0, 0, 0xFF), 5)
# insert information text to video frame
cv2.rectangle(input_frame, (10, 275), (230, 337), (180, 132, 109), -1)
cv2.putText(
input_frame,
'ROI Line',
((int)(line_width*0.8), line_height -30),
font,
0.6,
| |
<reponame>mfisherlevine/astroquery
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Basic imports
import copy
import io
import re
import warnings
# Import various astropy modules
import astropy.coordinates as coord
import astropy.units as u
import astropy.units.cds as cds
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.io.votable import parse_single_table
from astropy.table import QTable
from astropy.utils import deprecated, deprecated_renamed_argument
from astropy.utils.exceptions import AstropyWarning
# Import astroquery utilities
from astroquery.exceptions import (InputWarning, InvalidQueryError, NoResultsWarning,
RemoteServiceError)
from astroquery.query import BaseQuery
from astroquery.utils import async_to_sync, commons
from astroquery.utils.class_or_instance import class_or_instance
from astroquery.ipac.nexsci.nasa_exoplanet_archive import conf
# Import TAP client
import pyvo
# Objects exported when calling from astroquery.ipac.nexsci.nasa_exoplanet_archive import *
__all__ = ["NasaExoplanetArchive", "NasaExoplanetArchiveClass"]
# Dictionary mapping unit strings to astropy units
UNIT_MAPPER = {
"--": None,
"BJD": None, # TODO: optionally support mapping columns to Time objects
"BKJD": None, # TODO: optionally support mapping columns to Time objects
"D_L": u.pc,
"D_S": u.pc,
"Earth flux": u.L_sun / (4 * np.pi * u.au**2),
"Earth Flux": u.L_sun / (4 * np.pi * u.au**2),
"Fearth": u.L_sun / (4 * np.pi * u.au**2),
"M_E": u.M_earth,
"Earth Mass": u.M_earth,
"Mearth": u.M_earth,
"M_J": u.M_jupiter,
"Mjupiter": u.M_jupiter,
"Jupiter Mass": u.M_jupiter,
"R_Earth": u.R_earth, # Add u.R_jupiter
"Rearth": u.R_earth,
"Earth Radius": u.R_earth,
"Jupiter Radius": u.R_jupiter,
"Rjupiter": u.R_jupiter,
"Searth": u.L_sun / (4 * np.pi * u.au**2),
"R_Sun": u.R_sun,
"Rstar": u.R_sun,
"a_perp": u.au,
"arc-sec/year": u.arcsec / u.yr,
"cm/s**2": u.cm / u.s ** 2,
"g/cm**3": u.g / u.cm ** 3,
"day": u.day,
"days": u.day,
"degrees": u.deg,
"dexincgs": u.dex(u.cm / u.s ** 2),
"hours": u.hr,
"hrs": u.hr,
"kelvin": u.K,
"logLsun": u.dex(u.L_sun),
"log(Lsun)": u.dex(u.L_sun),
"log(Solar)": u.dex(u.L_sun),
"mags": u.mag,
"microas": u.uas,
"perc": u.percent,
"pi_E": None,
"pi_EE": None,
"pi_EN": None,
"pi_rel": None,
"ppm": cds.ppm,
"seconds": u.s,
"Solar mass": u.M_sun,
"solarradius": u.R_sun,
"Solar Radius": u.R_sun,
"log10(cm/s**2)": u.dex(u.cm / u.s ** 2),
"log(cm/s**2)": u.dex(u.cm / u.s ** 2),
"dex": u.dex(None),
"sexagesimal": None
}
CONVERTERS = dict(koi_quarters=[ascii.convert_numpy(str)])
# 'ps' and 'pscomppars' are the main tables of detected exoplanets.
# Calls to the old tables ('exoplanets', 'compositepars', 'exomultpars') will
# return errors and urge the user to call the 'ps' or 'pscomppars' tables
OBJECT_TABLES = {"ps": "pl_", "pscomppars": "pl_", "exoplanets": "pl_",
"compositepars": "fpl_", "exomultpars": "mpl_"}
MAP_TABLEWARNINGS = {"exoplanets": "Planetary Systems (PS)",
"compositepars": "Planetary System Composite Parameters table (PSCompPars)",
"exomultpars": "Planetary Systems (PS)"}
def get_access_url(service='tap'):
if service == 'tap':
url = conf.url_tap
elif service == 'api':
url = conf.url_api
return url
def get_tap_tables():
"""Tables accessed by API are gradually migrating to TAP service. Generate current list of tables in TAP."""
tap = pyvo.dal.tap.TAPService(baseurl=conf.url_tap)
response = tap.search(query="select * from TAP_SCHEMA.tables", language="ADQL")
if not commons.ASTROPY_LT_4_1:
tables = [table for table in response["table_name"].data if "TAP_SCHEMA." not in table]
else:
tables = [table.decode() for table in response["table_name"].data if b"TAP_SCHEMA." not in table]
return tables
class InvalidTableError(InvalidQueryError):
"""Exception thrown if the given table is not recognized by the Exoplanet Archive Servers"""
pass
# Class decorator, async_to_sync, modifies NasaExoplanetArchiveClass to convert
# all query_x_async methods to query_x methods
@async_to_sync
class NasaExoplanetArchiveClass(BaseQuery):
"""
The interface for querying the NASA Exoplanet Archive TAP and API services
A full discussion of the available tables and query syntax is available on the documentation
pages for `TAP <https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html>`_ and
`API <https://exoplanetarchive.ipac.caltech.edu/docs/program_interfaces.html>`_.
"""
# When module us imported, __init__.py runs and loads a configuration object,
# setting the configuration parameters con.url, conf.timeout and conf.cache
URL_API = conf.url_api
URL_TAP = conf.url_tap
TIMEOUT = conf.timeout
CACHE = conf.cache
# Make TAP_TABLES an attribute of NasaExoplanetArchiveClass
@property
def TAP_TABLES(self):
if not hasattr(self, '_tap_tables'):
self._tap_tables = get_tap_tables()
return self._tap_tables
# Ensures methods can be called either as class methods or instance methods. This is the basic query method.
@class_or_instance
def query_criteria_async(self, table, get_query_payload=False, cache=None, **criteria):
"""
Search a table given a set of criteria or return the full table
The syntax for these queries is described on the Exoplanet Archive TAP[1]_ API[2]_ documentation pages.
In particular, the most commonly used criteria will be ``select`` and ``where``.
Parameters
----------
table : str
The name of the table to query. A list of the tables on the Exoplanet Archive can be
found on the documentation pages [1]_, [2]_.
get_query_payload : bool, optional
Just return the dict of HTTP request parameters. Defaults to ``False``.
cache : bool, optional
Should the request result be cached? This can be useful for large repeated queries,
but since the data in the archive is updated regularly, this defaults to ``False``.
**criteria
The filtering criteria to apply. These are described in detail in the archive
documentation [1]_, [2]_ but some examples include ``select="*"`` to return all columns of
the queried table or ``where=pl_name='K2-18 b'`` to filter a specific column.
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
References
----------
.. [1] `NASA Exoplanet Archive TAP Documentation
<https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html>`_
.. [2] `NASA Exoplanet Archive API Documentation
<https://exoplanetarchive.ipac.caltech.edu/docs/program_interfaces.html>`_
"""
# Make sure table is lower-case
table = table.lower()
# Warn if old table is requested
if table in MAP_TABLEWARNINGS.keys():
raise InvalidTableError(
"The `{0}` table is no longer updated and has been replacedby the `{1}` table, which"
" is connected to the Exoplanet Archive TAP service. Although the argument keywords "
"of the called method should still work on the new table, the allowed values could "
"have changed since the database column names have changed; this document contains "
"the current definitions and a mapping between the new and deprecated names: "
"https://exoplanetarchive.ipac.caltech.edu/docs/API_PS_columns.html. You might also "
"want to review the TAP User Guide for help on creating a new query for the most "
"current data: https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html."
.format(table, MAP_TABLEWARNINGS[table]))
# Deal with lists of columns instead of comma separated strings
criteria = copy.copy(criteria)
if "select" in criteria:
select = criteria["select"]
if not isinstance(select, str):
select = ",".join(select)
criteria["select"] = select
# We prefer to work with IPAC format so that we get units, but everything it should work
# with the other options too
# Get the format, or set it to "ipac" if not given. Makes more sense to use CSV here.
criteria["format"] = criteria.get("format", "ipac")
# Less formats are allowed for TAP, so this needs to be updated. Default
# is VOTable (vot?, xml?), also csv and tsv are allowed
if "json" in criteria["format"].lower():
raise InvalidQueryError("The 'json' format is not supported")
# Build the query (and return it if requested)
request_payload = dict(table=table, **criteria)
if get_query_payload:
return request_payload
# Use the default cache setting if one was not provided
if cache is None:
cache = self.CACHE
if table in self.TAP_TABLES:
tap = pyvo.dal.tap.TAPService(baseurl=self.URL_TAP)
# construct query from table and request_payload (including format)
tap_query = self._request_to_sql(request_payload)
try:
response = tap.search(query=tap_query, language='ADQL') # Note that this returns a VOTable
except Exception as err:
raise InvalidQueryError(str(err))
else:
response = self._request(
"GET", self.URL_API, params=request_payload, timeout=self.TIMEOUT, cache=cache,
)
response.requested_format = criteria["format"]
return response
# This is the region query method
@class_or_instance
def query_region_async(self, table, coordinates, radius, *, get_query_payload=False, cache=None,
**criteria):
"""
Filter a table using a cone search around specified coordinates
Parameters
----------
table : str
The name of the table to query. A list of the tables on the Exoplanet Archive can be
found on the documentation pages [1]_, [2]_.
coordinates : str or `~astropy.coordinates`
The coordinates around which to query.
radius : str or `~astropy.units.Quantity`
The radius of the cone search. Assumed to be have units of degrees if not provided as
a ``Quantity``.
get_query_payload : bool, optional
Just return the dict of HTTP request parameters. Defaults to ``False``.
cache : bool, optional
Should the request result be cached? This can be useful for large repeated queries,
but since the data in the archive is updated regularly, this defaults to ``False``.
**criteria
Any other filtering criteria to apply. These are described in detail in the archive
documentation [1]_,[2]_ but some examples include ``select="*"`` to return all columns of
the queried table or ``where=pl_name='K2-18 b'`` to filter a specific column.
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
References
----------
.. [1] `NASA Exoplanet Archive TAP Documentation
<https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html>`_
.. [2] `NASA Exoplanet Archive API Documentation
<https://exoplanetarchive.ipac.caltech.edu/docs/program_interfaces.html>`_
"""
# Checks if coordinate strings | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import time
import os
import matplotlib.pyplot as plt
import datetime as dt
from numpy import seterr
seterr(all='raise')
np.set_printoptions(suppress=True, threshold=10000, linewidth=1000)
# vaild sele
# MENU = sys.argv[1]
# FILE_PATH = sys.argv[2]
# test sele
MENU = 'graph'
FILE_PATH = 'train_log_1634109617.txt'
Timestamp = time.time()
date = str(dt.date.today)[1] + str(dt.date.today)[2]
data_num_in_line = 28
# Internal document processing(singal line)
def str2list(_data_line):
_data_line = _data_line.rstrip('\n').split(' ', data_num_in_line)
# 由于gap与v_ego数据之间有空行,需要抛去空行所占strip位置
_data_line.pop(18)
_frame_list = []
_seq = [t for t in range(1, data_num_in_line, 2)]
for t in _seq:
_frame_list.append(_data_line[t])
return _frame_list
# Txt file to csv model
def txt2csv(path):
fp = open(path, "r")
# 临时变量
data_lens_temp = fp.readlines()
data_lens = len(data_lens_temp)
print('file %s lines read: %s' %(path, str(data_lens)) )
# 内存释放
del data_lens_temp
fp.seek(0, 0)
# 参数初始化s
df_list = np.zeros(shape=(data_lens, int(data_num_in_line/2)))
for _i in range(data_lens):
data_pre_line = fp.readline()
df_list[_i] = str2list(data_pre_line)
df_dataset = pd.DataFrame(df_list)
df_dataset.columns = ['EPISODE', 'TIMESTAMP', 'EPISODE_LENGTH', 'ACTION',
'REWARD', 'Avg_REWARD', 'training_Loss', 'Q_MAX',
'gap', 'v_ego', 'v_lead', 'time', 'a_ego', 'critic_loss']
df_dataset.to_csv(f'{os.path.splitext(FILE_PATH)[0]}.csv')
print('txt transfom to csv successful')
fp.close()
# Plot figure with index ACTION + REWARD + GAP + SPEED
def plot_action_reward_gap_v_(EPISODE, ACTION, gap, v_ego, v_lead):
epoch = int(EPISODE[-1])
epoch_iter = [i for i in range(epoch)]
gap_mean = []
action_mean = []
for i in epoch_iter:
start_index = np.argwhere(EPISODE == i)[0][0]
end_index = np.argwhere(EPISODE == i)[-1][0]
# 对每个epoch取对应长度数据的平均
gap_mean.append(np.array(gap[start_index:end_index].mean()))
action_mean.append(np.array(ACTION[start_index:end_index].mean()))
fig, ax1 = plt.subplots(figsize=(10, 3))
title = 'acc_info'
plt.title(title, fontsize=20)
# plt.grid(axis='y',color='grey',linestyle='--',lw=0.5,alpha=0.5)
# plt.tick_params(axis='both',labelsize=14)
plot1 = ax1.plot(epoch_iter, gap_mean, 'r')
ax1.set_ylabel('gap', fontsize = 18)
ax2 = ax1.twinx()
plot2 = ax2.plot(epoch_iter, action_mean, 'g')
plt.show()
ax2.set_ylabel('action', fontsize=18)
# ax2.tick_params(axis='y',labelsize=14)
# for tl in ax2.get_yticklabels():
# tl.set_color('g')
# ax2.set_xlim(1966,2014.15)
# lines = plot1 + plot2
# ax1.legend(lines,[l.get_label() for l in lines])
# plt.savefig("train_test{ }.png".format(date))
# Plot with CARSH and LOSS time
def plot_reward_action_crash(EPISODE, ACTION, gap, EPISODE_LENGTH):
epoch = int(EPISODE[-1])
epoch_iter = [i for i in range(epoch)]
crash_index = np.argwhere(gap <= 3)
lose_index = np.argwhere(gap >= 300)
done_index = np.argwhere(EPISODE_LENGTH == 480)
# crash/loss/done index
gap_crash = gap[crash_index[:, 0]]
gap_loss = gap[lose_index[:, 0]]
gap_done = gap[done_index[:, 0]]
action_mean = []
for i in epoch_iter:
start_index = np.argwhere(EPISODE == i)[0][0]
end_index = np.argwhere(EPISODE == i)[-1][0]
action_mean.append(np.array(ACTION[start_index:end_index].mean()))
fig, ax1 = plt.subplots(figsize=(10, 3))
title = 'acc_info'
print(f'Crash index:{EPISODE[crash_index[:, 0]].reshape(1, -1)}')
print(f'Loss index:{EPISODE[lose_index[:, 0]].reshape(1,-1)}')
plt.title(title, fontsize=20)
# plt.grid(axis='y',color='grey',linestyle='--',lw=0.5,alpha=0.5)
# plt.tick_params(axis='both',labelsize=14)
plot1 = ax1.scatter(EPISODE[crash_index[:, 0].reshape(len(gap_crash), 1)], gap_crash, c='red')
plot2 = ax1.scatter(EPISODE[lose_index[:, 0].reshape(len(gap_loss), 1)], gap_loss, c='blue')
plot3 = ax1.scatter(EPISODE[done_index[:, 0].reshape(len(gap_done), 1)], gap_done, c='green')
ax1.set_ylabel('gap', fontsize=18)
ax2 = ax1.twinx()
plot3 = ax2.plot(epoch_iter, action_mean, 'g')
plt.show()
ax2.set_ylabel('action', fontsize=18)
return crash_index, lose_index
# plt.figure(figsize=(8, 5))
# action, = plt.plot(epoch_iter, action_mean, linewidth=2, color='red')
# gap_, = plt.plot(epoch_iter, np.array(gap_mean), linewidth=2, color='blue')
# v_ego_, = plt.plot(EPISODE, v_ego, linewidth=2, color='yellow')
# v_lead_, = plt.plot(EPISODE, v_lead, linewidth=2, color='k')
# plt.legend(handles=[action, gap_, v_ego_, v_lead_], labels=['ACTION', 'gap', 'v_ego', 'v_lead'], loc='best')
# plt.title('acc_info')
# plt.xlabel('Epoch', size=10)
# plt.ylabel('info', size=10)
# plt.show()
def plot_Qmax_singel_timeframe(Qmax, time_stamp):
plt.title('Qmax generator')
plt.plot(time_stamp, Qmax)
plt.show()
def get_singal_info(EPISODE, EPISODE_LENGTH, v_lead, v_ego, gap, ACTION, REWARD, index):
epoch = int(EPISODE[-1])
epoch_iter = [i for i in range(epoch)]
start_idx = np.argwhere(EPISODE == index)[0][0]
end_idx = np.argwhere(EPISODE == index)[-1][0]
length_ep = [t for t in range(1, end_idx - start_idx + 1)]
# 参数范围划定
v_lead_ = v_lead[start_idx: end_idx]
v_ego_ = v_ego[start_idx: end_idx]
gap_ = gap[start_idx: end_idx]
action_ = ACTION[start_idx: end_idx]
reward_ = REWARD[start_idx: end_idx]
return length_ep, v_lead_, v_ego_, gap_, action_, reward_
def plot_singal_info(EPISODE_, EPISODE_LENGTH_, _v_lead, _v_ego, _gap, ACTION_, REWARD_, index_):
length_ep, v_lead_, v_ego_, gap_, action_, reward_ = get_singal_info(EPISODE_, EPISODE_LENGTH_,
_v_lead, _v_ego, _gap,
ACTION_, REWARD_, index_-1)
# Plot val in graph
ax_a_g = plt.subplot(411)
ax_a_v = ax_a_g.twinx()
v_lead_g, = ax_a_v.plot(length_ep, v_lead_, linewidth=2, color='C1')
v_ego_g, = ax_a_v.plot(length_ep, v_ego_, linewidth=2, color='C9')
gap_g, = ax_a_g.plot(length_ep, gap_, linewidth=2, color='C3', linestyle=':')
plt.legend(handles=[v_lead_g, v_ego_g, gap_g],
labels=['v_lead', 'v_ego', 'gap'], loc=2)
plt.title('info_{}'.format(index_-1))
plt.xlabel('Epoch', size=10)
plt.ylabel('info_{}'.format(index_-1), size=10)
plt.subplot(412)
action_g, = plt.plot(length_ep, action_, linewidth=2, color='C4')
reward_g, = plt.plot(length_ep, reward_, linewidth=2, color='C5', linestyle=':')
plt.legend(handles=[action_g, reward_g],
labels=['action', 'reward'], loc=2)
# plt.title('info_{}'.format(_index-1))
# plt.xlabel('Epoch', size=10)
# plt.ylabel('info_{}'.format(_index-1), size=10)
length_ep, v_lead_, v_ego_, gap_, action_, reward_ = get_singal_info(EPISODE_,
EPISODE_LENGTH_, _v_lead, _v_ego,
_gap, ACTION_, REWARD_, index_)
'''
# 制作数据,处理数据 >>>
v_relative = (v_ego_[:, 0] - v_lead_[:, 0]).reshape(-1, 1)
acc_relative = np.zeros((len(length_ep), 1))
for index in range(1, len(length_ep)):
try:
acc_relative[index, :] = (v_relative[index, :]**2 - v_relative[index - 1, :]**2) / (2 * (gap_[index, :] - gap_[index-1, :]))
except FloatingPointError as e: # numpy将所有0/0错误归于FloatingPointError,第十行定义numpy抛出所有警告类型为错误,即可捕获RunTimeWarning
print(f"index {index} gap has no change")
acc_relative[index, :] = 0
print(np.concatenate([np.arange(0, len(length_ep)).reshape(-1, 1), v_lead_, v_ego_, gap_, action_, reward_, v_relative, acc_relative], axis=1))
# 制作数据,处理数据 <<<
'''
# Plot val in graph
ax_b_g = plt.subplot(413)
ax_b_v = ax_b_g.twinx()
v_lead_g, = ax_b_v.plot(length_ep, v_lead_, linewidth=2, color='C1')
v_ego_g, = ax_b_v.plot(length_ep, v_ego_, linewidth=2, color='C9')
gap_g, = ax_b_g.plot(length_ep, gap_, linewidth=2, color='C3', linestyle=':')
plt.legend(handles=[v_lead_g, v_ego_g, gap_g],
labels=['v_lead', 'v_ego', 'gap'], loc=2)
plt.title('info_{}'.format(index_))
plt.xlabel('Epoch', size=10)
plt.ylabel('info_{}'.format(index_), size=10)
plt.subplot(414)
action_g, = plt.plot(length_ep, action_, linewidth=2, color='C4')
reward_g, = plt.plot(length_ep, reward_, linewidth=2, color='C5', linestyle=':')
plt.legend(handles=[action_g, reward_g],
labels=['action', 'reward'], loc=2)
plt.show()
def relative(EPISODE_, EPISODE_LENGTH_, _v_lead, _v_ego, _gap, ACTION_, REWARD_, index_):
length_ep, v_lead_, v_ego_, gap_, action_, reward_ = get_singal_info(EPISODE_, EPISODE_LENGTH_,
_v_lead, _v_ego, _gap,
ACTION_, REWARD_, index_)
acc_lead_ = np.zeros(((len(length_ep)), 1))
acc_ego_ = np.zeros(((len(length_ep)), 1))
acc_lead_[1:, :] = (v_lead_[1:, :] - v_lead_[:-1, :]) / 0.5
acc_ego_[1:, :] = (v_ego_[1:, :] - v_ego_[:-1, :]) / 0.5
acc_compare = acc_ego_ - acc_lead_
# 制作数据,处理数据 >>>
v_relative = (v_ego_[:, 0] - v_lead_[:, 0]).reshape(-1, 1)
gap_relative = np.zeros((len(length_ep), 1))
gap_relative[1:, :] = (gap_[1:, 0] - gap_[:-1, 0]).reshape(-1, 1)
acc_relative = np.zeros((len(length_ep), 1))
for index in range(1, len(length_ep)):
try:
acc_relative[index, :] = (v_relative[index, :]**2 - v_relative[index - 1, :]**2) / (2 * (gap_[index, :] - gap_[index-1, :]))
except FloatingPointError as e: # numpy将所有0/0错误归于FloatingPointError,第十行定义numpy抛出所有警告类型为错误,即可捕获RunTimeWarning
print(f"index {index} gap has no change")
acc_relative[index, :] = 0
# reward_recal = Caculate_reward(v_relative, gap_, acc_relative)
try:
ttc = gap_ / v_relative
except FloatingPointError as e:
pass
# reward_gap = (np.exp(-(gap_ - 50)**2 / (2 * 5.3**2)) / (np.sqrt(2*np.pi) * 5.3)) * 100 / 7.9
#
# reward_recal = np.zeros((len(length_ep), 1))
# for index in range(len(length_ep)):
# if ttc[index, :] < 0:
# reward_recal[index, :] = (reward_gap[index, :] - 0.2) / 0.75
# elif ttc[index, :] >= 0:
# reward_recal[index, :] = scti_Caculate(ttc[index, :]) * 0.5 + (reward_gap[index, :] - 0.2) * 0.5 / 0.75
# print(np.concatenate([np.arange(0, len(length_ep)).reshape(-1, 1), v_lead_, v_ego_, gap_, action_, reward_, v_relative, acc_relative, acc_compare, (reward_gap - 0.2)/0.75, reward_recal], axis=1))
# 计算距离确定公式是否正确
t = 3.5
distance_ef = gap_ - (v_relative * 0.5 + 0.5 * acc_compare * 0.5 ** 2)
action_best = (-(50 - gap_) - v_relative * t) / (2 * t ** 2)
reward_recal = np.zeros((len(length_ep), 1))
for index in range(len(length_ep)):
# reward_recal[index, :] = np.exp((action_[index, :] - 0.5 - (-(50 - gap_[index, :] - v_relative[index, :] * t) / ((2 * t) ** 2))) / 2 * 0.5 ** 2) / (np.sqrt(2 * np.pi) * 0.5) * 0.5 / 0.8
try:
reward_recal[index, :] = (np.exp(-(acc_compare[index, :] - action_best[index, :]) ** 2 / (2 * (0.3 ** 2))) / (np.sqrt(2 * np.pi) * 0.3)) / 1.4
except FloatingPointError as e:
reward_recal[index, :] = 0
print(np.concatenate([np.arange(0, len(length_ep)).reshape(-1, 1), v_lead_, v_ego_, gap_, action_, reward_, action_best, acc_ego_, acc_lead_, acc_compare, reward_recal], axis=1))
print(f'acc max:{acc_compare[:, 0].max()}')
print(f'acc min:{acc_compare[:, 0].min()}')
print(f'v_relative max:{v_relative[:, 0].max()}')
print(f'v_relative min:{v_relative[:, 0].min()}')
# 制作数据,处理数据 <<<
# fig, axis = plt.subplots()
# axis2 = axis.twinx()
# v_rel, = axis2.plot(length_ep, v_relative, linewidth=2, color='C1')
# gap_rel, = axis.plot(length_ep, gap_relative, linestyle='-', color='C3')
# acc_rel, = axis2.plot(length_ep, acc_compare, linewidth=2, color='C9')
# # gap_g, = ax_b_g.plot(length_ep, gap_, linewidth=2, color='C3', linestyle=':')
# plt.legend(handles=[v_rel, gap_rel, acc_rel],
# labels=['v_rel', 'gap_rel', 'acc_rel'], loc='best')
# plt.show()
def scti_Caculate(ttc_min, ttc_=8):
if ttc_min <= ttc_:
scti = (100 * np.power(ttc_min, 1.4)) / (np.power(ttc_min, 1.4) + np.power(ttc_ - ttc_min, 1.5)) / 100
elif ttc_min - ttc_ > 100:
scti = -1
else:
scti = (100 * np.exp((-np.power((ttc_min - ttc_), 2)) / (2 * np.power(ttc_, 2)))) / 100
return scti
def Caculate_reward(v_ref, g_ref, a_ref):
a1 = -1.08e-3
a2 = 1.136e-4
a3 = -1.643e-2
a4 = 9.927e-4
a5 = -002.163e-3
a6 = -1.6435e-2
a7 = -7.3387e-2
a8 = -2.0589e-2
a9 = 6.83969e-2
a10 = 1.116254
Function = a1*(v_ref**2) + a2*(g_ref**2) + a3*(a_ref**2) + a4*v_ref*g_ref + a5*g_ref*a_ref + a6*v_ref*a_ref + | |
getdata[i*2+5]== 0xff and getdata[i*2+6]== 0xff:
actangle[i-1] = -1
else:
actangle[i-1] = getdata[i*2+5] + (getdata[i*2+6]<<8)
return actangle
#读取实际的受力
def get_actforce():
global hand_id
global ser
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#读操作
b[4] = 0x11
#地址
b[5] = 0x2E
b[6] = 0x06
#读取寄存器的长度
b[7] = 0x0C
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(20)
print('返回的数据:')
for i in range(1,21):
print(hex(getdata[i-1]))
actforce = [0]*6
for i in range(1,7):
if getdata[i*2+5]== 0xff and getdata[i*2+6]== 0xff:
actforce[i-1] = -1
else:
actforce[i-1] = getdata[i*2+5] + (getdata[i*2+6]<<8)
return actforce
#读取电流
def get_current():
global hand_id
global ser
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#读操作
b[4] = 0x11
#地址
b[5] = 0x3A
b[6] = 0x06
#读取寄存器的长度
b[7] = 0x0C
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(20)
print('返回的数据:')
for i in range(1,21):
print(hex(getdata[i-1]))
current = [0]*6
for i in range(1,7):
if getdata[i*2+5]== 0xff and getdata[i*2+6]== 0xff:
current[i-1] = -1
else:
current[i-1] = getdata[i*2+5] + (getdata[i*2+6]<<8)
return current
#读取故障信息
def get_error():
global hand_id
global ser
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#读操作
b[4] = 0x11
#地址
b[5] = 0x46
b[6] = 0x06
#读取寄存器的长度
b[7] = 0x06
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(14)
print('返回的数据:')
for i in range(1,15):
print(hex(getdata[i-1]))
error = [0]*6
for i in range(1,7):
error[i-1] = getdata[i+6]
return error
#读取状态信息
def get_status():
global hand_id
global ser
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#读操作
b[4] = 0x11
#地址
b[5] = 0x4C
b[6] = 0x06
#读取寄存器的长度
b[7] = 0x06
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(14)
print('返回的数据:')
for i in range(1,15):
print(hex(getdata[i-1]))
status = [0]*6
for i in range(1,7):
status[i-1] = getdata[i+6]
return status
#读取温度信息
def get_temp():
global hand_id
global ser
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#读操作
b[4] = 0x11
#地址
b[5] = 0x52
b[6] = 0x06
#读取寄存器的长度
b[7] = 0x06
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(14)
print('返回的数据:')
for i in range(1,15):
print(hex(getdata[i-1]))
temp = [0]*6
for i in range(1,7):
temp[i-1] = getdata[i+6]
return temp
#清除错误
def set_clear_error():
global hand_id
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#写操作
b[4] = 0x12
#地址
b[5] = 0xEC
b[6] = 0x03
#数据
b[7] = 0x01
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(9)
print('返回的数据:')
for i in range(1,10):
print(hex(getdata[i-1]))
#保存参数到FLASH
def set_save_flash():
global hand_id
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#写操作
b[4] = 0x12
#地址
b[5] = 0xED
b[6] = 0x03
#数据
b[7] = 0x01
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(18)
print('返回的数据:')
for i in range(1,19):
print(hex(getdata[i-1]))
#力传感器校准
def gesture_force_clb():
global hand_id
datanum = 0x04
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#写操作
b[4] = 0x12
#地址
b[5] = 0xF1
b[6] = 0x03
#数据
b[7] = 0x01
#校验和
b[8] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(18)
print('返回的数据:')
for i in range(1,19):
print(hex(getdata[i-1]))
#设置上电速度
def setdefaultspeed(speed1,speed2,speed3,speed4,speed5,speed6):
global hand_id
if speed1 <0 or speed1 >1000:
print('数据超出正确范围:0-1000')
return
if speed2 <0 or speed2 >1000:
return
if speed3 <0 or speed3 >1000:
return
if speed4 <0 or speed4 >1000:
return
if speed5 <0 or speed5 >1000:
return
if speed6 <0 or speed6 >1000:
return
datanum = 0x0F
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#写操作
b[4] = 0x12
#地址
b[5] = 0x08
b[6] = 0x04
#数据
b[7] = data2bytes(speed1)[0]
b[8] = data2bytes(speed1)[1]
b[9] = data2bytes(speed2)[0]
b[10] = data2bytes(speed2)[1]
b[11] = data2bytes(speed3)[0]
b[12] = data2bytes(speed3)[1]
b[13] = data2bytes(speed4)[0]
b[14] = data2bytes(speed4)[1]
b[15] = data2bytes(speed5)[0]
b[16] = data2bytes(speed5)[1]
b[17] = data2bytes(speed6)[0]
b[18] = data2bytes(speed6)[1]
#校验和
b[19] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(9)
print('返回的数据:')
for i in range(1,10):
print(hex(getdata[i-1]))
#设置上电力控阈值
def setdefaultpower(power1,power2,power3,power4,power5,power6):
global hand_id
if power1 <0 or power1 >1000:
print('数据超出正确范围:0-1000')
return
if power2 <0 or power2 >1000:
return
if power3 <0 or power3 >1000:
return
if power4 <0 or power4 >1000:
return
if power5 <0 or power5 >1000:
return
if power6 <0 or power6 >1000:
return
datanum = 0x0F
b = [0]*(datanum + 5)
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum
#写操作
b[4] = 0x12
#地址
b[5] = 0x14
b[6] = 0x04
#数据
b[7] = data2bytes(power1)[0]
b[8] = data2bytes(power1)[1]
b[9] = data2bytes(power2)[0]
b[10] = data2bytes(power2)[1]
b[11] = data2bytes(power3)[0]
b[12] = data2bytes(power3)[1]
b[13] = data2bytes(power4)[0]
b[14] = data2bytes(power4)[1]
b[15] = data2bytes(power5)[0]
b[16] = data2bytes(power5)[1]
b[17] = data2bytes(power6)[0]
b[18] = data2bytes(power6)[1]
#校验和
b[19] = checknum(b,datanum+4)
#向串口发送数据
putdata = b''
for i in range(1,datanum+6):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,datanum+6):
print(hex(putdata[i-1]))
getdata= ser.read(9)
print('返回的数据:')
for i in range(1,10):
print(hex(getdata[i-1]))
#读取当前的运行寄存器的值
def excutableReadMoveSeq():
print("excutableReadMoveSeq:")
global hand_id
global ser
# 需要修改 1
datanum = 0x01
#字符串的发送数据大小
b = [0]*(datanum + 8)
lens = int(datanum) + 8
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum + 0x03
#读操作
b[4] = 0x11
#地址 需要修改 2
b[5] = 0x10
b[6] = 0x09
#读取寄存器的长度 需要修改 2
b[7] = 0x01
#校验和 长度不包括校验和吗
print(lens)
#b[8] = checknum(b,b[3]+4)
b[8] = checknum(b,lens-1)
#向串口发送数据
putdata = b''
for i in range(1,lens+1):
putdata = putdata + num2str(b[i-1])
ser.write(putdata)
print('发送的数据:')
for i in range(1,lens+1):
print(hex(putdata[i-1]))
getdata= ser.read(9)
print('返回的数据:')
for i in range(1,10):
print(hex(getdata[i-1]))
temp = [0]*1
for i in range(1,2):
temp[i-1] = getdata[i+6]
print(temp)
return temp
#写入当前的运行寄存器的值
def excutableWriteMoveSeq():
print("excutableWriteMoveSeq:")
global hand_id
global ser
datanum = 0x01
b = [0]*(datanum + 8)
lens = int(datanum) + 8
#包头
b[0] = 0xEB
b[1] = 0x90
#hand_id号
b[2] = hand_id
#数据个数
b[3] = datanum + 0x03
#写操作
b[4] = 0x12
#地址
b[5] = 0x10
b[6] = 0x9
#写入寄存器的数据
b[7] = 0x01
#校验和
b[8] = checknum(b,lens-1)
#向串口发送数据
putdata = b''
for i in range(1,lens+1):
putdata = putdata + num2str(b[i-1])
| |
#!/usr/bin/env python3
#===============================================================================
# smsExtractor.py | Version 1.3 | FreeBSD License | 2022-06-21
# <NAME> | <EMAIL>
#
# Description:
# Simple script that extracts SMS and MMS messages from previously
# generated XML files into a subdirectory named after the given file.
#
# Restrictions:
# The files are assumed to have been generated by the Android app
# 'SMS Backup & Restore' by Ritesh Sahu. It also assumes a UNIX-like OS,
# specifically GNU/Linux, though it might work on other OSes as well.
#===============================================================================
import sys, os
import binascii, sqlite3
## Global Options
gOpts = {
"multiple" : False,
"subdirs" : False,
"force" : False
}
class Message:
def __init__( self, data ):
self.parse_data( data )
def parse_data( self, data ):
self.type = data[ 0 ]
self.address = data[ 1 ]
self.mType = data[ 2 ]
self.date = data[ 3 ]
self.rDate = data[ 4 ]
self.name = data[ 5 ]
self.body = data[ 6 ]
self.data = data[ 7 ]
self.src = data[ 8 ]
def txt( self ):
## The top bar consists of 'sent/received' and the date
txt = ( 80 * '=' ) + '\n'
if self.mType == "1":
txt += "Received on %s\n" % self.rDate
elif self.mType == "2":
txt += "Sent on %s\n" % self.rDate
else:
txt += "%s\n" % self.rDate
txt += ( 80 * '-' ) + '\n'
## The body (actual content) of the text
txt += "%s\n\n" % self.body
## If it had an image or whatever attached, let the user know
## and indicate which file it is
if self.type == "mms":
txt += "Image: %s\n" % self.src
## Cap it off with a bit of padding and add it to the list
txt += "\n\n"
return( txt )
def directory_setup( filename ):
## Create the top-level directory into which we extract the messages
try:
os.mkdir( "%s.d" % filename )
except PermissionError:
print( "ERROR: Could not create directory '%s.d'. Aborting." %
filename )
sys.exit( 1 )
except FileExistsError:
if not gOpts[ "force" ]:
print( "WARNING: Directory '%s.d' exists: Skipping" % filename )
return( None, None, True )
## Make the subdirectories names
fDir = os.path.join( "%s.d" % filename, "files" )
mDir = os.path.join( "%s.d" % filename, "messages" )
## Actually create the directories if they don't exist already
if not os.path.exists( fDir ):
os.mkdir( fDir )
if not os.path.exists( mDir ):
os.mkdir( mDir )
## Return the directories or quit if they couldn't be made
if os.path.exists( mDir ) and os.path.exists( fDir ):
return( mDir, fDir, False )
else:
print("ERROR: Could not create/find subdirectories. Aborting.")
os.rmdir( "%s.d" % filename )
sys.exit( 1 )
def extract( filename, c ):
"""
Extract all of the entries from a file opened using 'filename'. All of the
content is placed into database 'db', accessed using cursor 'c'.
"""
## Report progress
print( "Extracting content from '%s'..." % filename )
## Open the file, read it, close it
fp = open( filename, "r" )
lines = fp.readlines()
fp.close()
for l in lines:
if "<sms protocol" in l:
## Grab usable information from metadata
address = l.partition( 'address="' )[2].partition( '"' )[0]
date = int( l.partition( 'date="' )[2].partition( '"' )[0] )
rDate = l.partition( 'readable_date="' )[2].partition( '"' )[0]
body = l.partition( 'body="' )[2].partition( '" toa=' )[0]
mType = l.partition( 'type="' )[2].partition( '"' )[0]
name = l.partition( 'name="' )[2].partition( '"' )[0]
address = address.replace( '+', '' )
#if( len( address ) > 9 ):
# address = address[ ( len(address) ) - 10 : ]
## Put all of the information into a tuple
stuff = ( "sms", address, mType, date, rDate, name, body,
None, "null")
## Put it into the database
c.execute("""insert into messages values(?,?,?,?, ?, ?, ?, ?, ?)""",
stuff )
if( "image/jpeg" in l or "image/png" in l or "image/gif" in l or
"video/3gpp" in l):
## Counters
vidCount = 0
imageCount = 0
## Get the proper extension
extension = l.partition( 'ct="' )[2].partition( '"' )[0]
extension = extension.partition( '/' )[2]
if extension == "3gpp":
extension = "3gp"
elif extension == "jpeg":
extension = "jpg"
## Metadata is a couple lines up
index = lines.index( l )
meta = lines[ index - 3 ]
prevLine = lines[ index - 1 ]
nextLine = lines[ index + 1 ]
## Grab information from the metadata
address = meta.partition( 'address="' )[2].partition( '"' )[0]
date = meta.partition( 'date="' )[2].partition( '"' )[0]
rDate = meta.partition( 'readable_date="' )[2].partition( '"' )[0]
name = meta.partition( 'contact_name="' )[2].partition( '"' )[0]
## Find the mType using the m_size value; null means outgoing,
## anything else was received
if meta.partition( 'm_size="' )[2].partition( '"' )[0] == "null":
mType = '1'
else:
mType = '2'
## Fix address string
address = address.replace( '+', '' )
#if( len( address ) > 9 ):
# address = address[ ( len(address) ) - 10 : ]
## Name the file source properly... more or less
if extension == "3gp":
src = prevLine.partition( 'video src="' )[2].partition( '"' )[0]
if src == "":
vidCount += 1
src = "vid_%03d.3gp" % vidCount
else:
src = prevLine.partition( 'img src="' )[2].partition( '"' )[0]
if src == "":
imageCount += 1
src = "img_%03d.%s" % ( imageCount, extension )
## If they sent a message with the MMS
if 'text="' in nextLine:
body = nextLine.partition( 'text="' )[2].partition( '"' )[0]
else:
body = ""
## Turn the MMS base64 text into usable binary data
dataText = l.partition( 'data="' )[2].partition( '"' )[0]
data = binascii.a2b_base64( dataText )
## Put it all into a tuple
stuff = ( "mms", address, mType, date, rDate, name, body,
data, src )
## STUFF THAT FUCKER INTO THE DATABASE
c.execute("""insert into messages values(?, ?,?,?,?, ?, ?, ?, ?)""",
stuff )
def print_help():
print( "Usage: smsExtractor.py FILE.xml" )
print( """
This script extracts SMS messages and MMS files (jpg, png or 3gp) from a given
XML file, as they're read, into a subdirectory named after the file in question.
Data extracted from "mms.xml" will go into "mms.xml.d", as an example.
NOTE:
This script assumes files generated by the Android app "SMS Backup & Restore" by
<NAME>. It isn't guaranteed nor even expected to work with anything else.
You can find his program at either of the following sites:
(Developer's site)
http://android.riteshsahu.com/apps/sms-backup-restore
(Google Play store)
https://play.google.com/store/apps/details?id=com.riteshsahu.SMSBackupRestore
Options:
-h or --help: This help text
-V or --version: Version and author info
-s or --subdirs: Write files to individual subdirectories per each contact
-f or --force: Overwrite extant files, or into extant directories
-m or --multiple: Write messages to multiple (individual) files
""" )
def print_version():
print( "smsExtractor.py, version 1.3" )
print( "<NAME> <<EMAIL>>" )
def write_message_to_separate_file( mDir, address, name, message ):
## Files are named using an 'ADDRESS_NAME_DATE.txt' convention
fp = open( os.path.join( mDir, "%s_%s_%s.txt" %
( address, name, message.rDate ) ), "w" )
fp.write( message.body )
fp.close()
def write_messages_to_single_file( mDir, address, name, messages ):
## Files are named using an 'ADDRESS_NAME.txt' convention
fp = open( os.path.join( mDir, "%s_%s.txt" % ( address, name ) ), "w" )
count = 0
for m in messages:
fp.write( m.txt() )
count += 1
fp.close()
return( count )
def write_messages( filename, mDir, fDir, c ):
## Grab all of the stuff from the database
c.execute( """select * from messages order by date""" )
data = c.fetchall()
## Get the addresses, get the names, see if we've got any MMS files
addresses = []
names = {}
filesPresent = False
for d in data:
if filesPresent is False and d[7] is not None:
filesPresent = True
if d[1] not in addresses:
addresses.append( d[1] )
names[ d[1] ] = d[5]
## If there are no files, remove the files subdirectory
if not filesPresent:
os.rmdir( fDir )
print( "Found %i pieces of data total with %i unique addresses" % ( len( data ), len( addresses ) ) )
## Start up a couple of counters
smsCount = 0
mmsCount = 0
print( "Writing Message Text." )
## Write all of the text messages to disk
| |
#!/usr/bin/env python
# * coding: utf8 *
'''
cloudb
Usage:
cloudb enable extensions [--verbosity=<level>]
cloudb create schema [--schemas=<name> --verbosity=<level>]
cloudb create admin-user [--verbosity=<level>]
cloudb create read-only-user [--verbosity=<level>]
cloudb create indexes [--verbosity=<level>]
cloudb drop schema [--schemas=<name> --verbosity=<level>]
cloudb import [--missing --dry-run --verbosity=<level> --skip-if-exists]
cloudb trim [--dry-run --verbosity=<level>]
cloudb update [--table=<tables>... --dry-run --verbosity=<level> --from-change-detection]
cloudb update-schema [--table=<tables>... --dry-run --verbosity=<level>]
'''
import sys
from datetime import datetime
from pathlib import Path
from time import perf_counter
import psycopg2
from colorama import Back, Fore, init
from docopt import docopt
from osgeo import gdal, ogr
import pyodbc
from . import CONNECTION_TABLE_CACHE, LOG, execute_sql, config, roles, schema, utils
from .index import INDEXES
gdal.SetConfigOption('MSSQLSPATIAL_LIST_ALL_TABLES', 'YES')
gdal.SetConfigOption('PG_LIST_ALL_TABLES', 'YES')
gdal.SetConfigOption('PG_USE_POSTGIS', 'YES')
gdal.SetConfigOption('PG_USE_COPY', 'YES')
def enable_extensions():
'''enable the database extension
owner: string db owner
'''
LOG.info('enabling extensions')
execute_sql('CREATE EXTENSION postgis;CREATE EXTENSION pg_stat_statements;', config.DBO_CONNECTION)
def _get_tables_with_fields(connection_string, specific_tables):
'''creates a list of tables with fields from the connection string
connection_string: string to connect to db
specific_tables: array of tables to get in schema.table format
returns: array of tuples with 0: schema, 1: table name: 2: array of field names
'''
layer_schema_map = []
filter_tables = False
if specific_tables and len(specific_tables) > 0:
LOG.debug(f'{Fore.CYAN}filtering for specific tables{Fore.RESET}')
filter_tables = True
LOG.verbose('connecting to database')
connection = gdal.OpenEx(connection_string)
LOG.verbose('getting layer count')
table_count = connection.GetLayerCount()
LOG.info(f'discovered {Fore.YELLOW}{table_count}{Fore.RESET} tables')
for table_index in range(table_count):
qualified_layer = connection.GetLayerByIndex(table_index)
schema_name, layer = qualified_layer.GetName().split('.')
schema_name = schema_name.lower()
layer = layer.lower()
LOG.debug(f'- {Fore.CYAN}{schema_name}.{layer}{Fore.RESET}')
if schema_name in config.EXCLUDE_SCHEMAS or filter_tables and f'{schema_name}.{layer}' not in specific_tables:
LOG.verbose(f' {Fore.RED}- skipping:{Fore.RESET} {schema_name}')
continue
definition = qualified_layer.GetLayerDefn()
fields = []
for field_index in range(definition.GetFieldCount()):
field = definition.GetFieldDefn(field_index)
field_name = field.GetName().lower()
if field_name in config.EXCLUDE_FIELDS:
LOG.verbose(f' {Fore.YELLOW}- skipping:{Fore.RESET} {field_name}')
continue
fields.append(field_name)
layer_schema_map.append((schema_name, layer, fields))
del qualified_layer
schema_map_count = len(layer_schema_map)
noun = 'tables'
if schema_map_count == 1:
noun = 'table'
LOG.info(f'planning to import {Fore.GREEN}{schema_map_count}{Fore.RESET} {noun}')
layer_schema_map.sort(key=lambda items: items[0])
connection = None
return layer_schema_map
def _get_schema_table_name_map(table_name):
'''a method to split a qualified table into it's parts
'''
parts = table_name.split('.')
schema_index = 1
table_index = 2
if len(parts) == 2:
schema_index = 0
table_index = 1
return {'schema': parts[schema_index].lower(), 'table_name': parts[table_index].lower()}
def _format_title_for_pg(title):
if title is None:
return title
new_title = title.lower()
new_title = new_title.replace('utah ', '', 1).replace(' ', '_')
LOG.verbose(f'updating {Fore.MAGENTA}{title}{Fore.RESET} to {Fore.CYAN}{new_title}{Fore.RESET}')
return new_title
def _get_table_meta():
'''gets the meta data about fields from meta.agolitems
'''
mapping = {}
with pyodbc.connect(config.get_source_connection()[6:]) as connection:
cursor = connection.cursor()
cursor.execute("SELECT [TABLENAME],[AGOL_PUBLISHED_NAME],[GEOMETRY_TYPE] FROM [SGID].[META].[AGOLITEMS]")
rows = cursor.fetchall()
#: table: SGID.ENVIRONMENT.DAQPermitCompApproval
#: title: Utah Retail Culinary Water Service Areas
#: geometry_type: POINT POLYGON POLYLINE
for table, title, geometry_type in rows:
table_parts = _get_schema_table_name_map(table)
pg_title = _format_title_for_pg(title)
schema_name = mapping.setdefault(table_parts['schema'], {})
schema_name[table_parts['table_name']] = {'title': pg_title, 'geometry_type': geometry_type}
return mapping
def _populate_table_cache(connection_string, pgify=False, name_map=None):
'''adds all the table from a connection string to a dictionary for caching purposes
pgify: lowercases and adds underscores
name_map: is a dictionary to replace names from the meta table
'''
skip_schema = ['meta', 'sde']
LOG.verbose('connecting to database')
#: gdal.open gave a 0 table count
connection = ogr.Open(connection_string)
LOG.verbose('getting layer count')
table_count = connection.GetLayerCount()
LOG.debug(f'found {Fore.YELLOW}{table_count}{Fore.RESET} total tables for cache')
CONNECTION_TABLE_CACHE.setdefault(connection_string, [])
for table_index in range(table_count):
qualified_layer = connection.GetLayerByIndex(table_index)
table = None
if qualified_layer:
name = qualified_layer.GetName()
LOG.verbose(f'qualified layer name: {name}')
if '.' not in name:
continue
table_parts = _get_schema_table_name_map(name)
name = f"{table_parts['schema']}.{table_parts['table_name']}"
if table_parts['schema'] in skip_schema:
continue
if pgify:
pg_title = _format_title_for_pg(table_parts['table_name'])
schema_name = table_parts['schema']
if schema_name in name_map and pg_title in name_map[schema_name]:
table, _ = name_map[schema_name][pg_title].values()
else:
continue
name = f"{schema_name}.{table}"
LOG.verbose(f'found layer: {name}')
CONNECTION_TABLE_CACHE[connection_string].append(name)
del qualified_layer
connection = None
def _check_if_exists(connection_string, schema_name, table, agol_meta_map):
'''returns true or false if a table exists in the connections_string db
connection_string: string of db to check
schema_name: string schema name
table: string table name
returns: bool
'''
LOG.debug('checking cache')
if schema_name in agol_meta_map and table in agol_meta_map[schema_name]:
table, _ = agol_meta_map[schema_name][table].values()
if connection_string in CONNECTION_TABLE_CACHE and len(CONNECTION_TABLE_CACHE[connection_string]) > 0:
LOG.verbose('cache hit')
return f'{schema_name}.{table}' in CONNECTION_TABLE_CACHE[connection_string]
LOG.verbose('cache miss')
_populate_table_cache(connection_string)
found = False
if f'{schema}.{table}' in CONNECTION_TABLE_CACHE[connection_string]:
found = True
return found
def _replace_data(schema_name, layer, fields, agol_meta_map, dry_run):
'''the insert logic for writing to the destination
'''
cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)
internal_sgid = config.get_source_connection()
internal_name = f'{schema_name}.{layer}'
sql = f'SELECT objectid FROM "{schema_name}.{layer}"'
if len(fields) > 0:
#: escape reserved words?
fields = [f'"{field}"' for field in fields]
sql = f"SELECT {','.join(fields)} FROM \"{schema_name}.{layer}\""
options = [
'-f',
'PostgreSQL',
'-dialect',
'OGRSQL',
'-sql',
sql,
'-lco',
'FID=xid',
'-lco',
f'SCHEMA={schema_name}',
'-lco',
'OVERWRITE=YES',
'-lco',
'GEOMETRY_NAME=shape',
'-lco',
'PRECISION=YES',
'-a_srs',
config.UTM,
]
if schema_name in agol_meta_map and layer in agol_meta_map[schema_name]:
new_name, geometry_type = agol_meta_map[schema_name][layer].values()
if new_name:
layer = new_name
if geometry_type == 'POLYGON':
options.append('-nlt')
options.append('MULTIPOLYGON')
elif geometry_type == 'POLYLINE':
options.append('-nlt')
options.append('MULTILINESTRING')
elif geometry_type == 'STAND ALONE':
options.append('-nlt')
options.append('NONE')
else:
options.append('-nlt')
options.append(geometry_type)
else:
LOG.info(f'- skipping {Fore.MAGENTA}{layer}{Fore.RESET} since it is no longer in the meta table{Fore.RESET}')
return
options.append('-nln')
options.append(f'{layer}')
pg_options = None
try:
pg_options = gdal.VectorTranslateOptions(options=options)
except Exception:
LOG.fatal(f'- {Fore.RED}invalid options{Fore.RESET} for {Fore.BLUE}{layer}{Fore.RESET}')
return
LOG.info(f'- inserting {Fore.MAGENTA}{layer}{Fore.RESET} into {Fore.BLUE}{schema_name}{Fore.RESET} as {Fore.CYAN}{geometry_type}{Fore.RESET}')
LOG.debug(f'with {Fore.CYAN}{sql}{Fore.RESET}')
if not dry_run:
start_seconds = perf_counter()
result = gdal.VectorTranslate(cloud_db, internal_sgid, options=pg_options)
LOG.debug(f'- {Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')
del result
LOG.debug(f'- {Fore.CYAN}make valid{Fore.RESET}')
qualified_layer = f'{schema_name}.{layer}'
make_valid(qualified_layer)
schema.update_schema_for(internal_name, qualified_layer)
create_index(qualified_layer)
def import_data(if_not_exists, missing_only, dry_run):
'''imports data from sql to postgis
if_not_exists: create new tables if the destination does not have it
dry_run: do not modify the destination
missing_only: only import missing tables
'''
cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)
internal_sgid = config.get_source_connection()
tables = []
if missing_only:
source, destination = _get_table_sets()
tables = destination - source
table_count = len(tables)
verb = 'are'
noun = 'tables'
if table_count == 1:
verb = 'is'
noun = 'table'
LOG.info(f'there {verb} {Fore.CYAN}{table_count}{Fore.RESET} {noun} in the source not in the destination')
LOG.verbose(','.join(tables))
if table_count == 0:
return
agol_meta_map = _get_table_meta()
if missing_only:
origin_table_name = []
#: reverse lookup the table names
for table in tables:
schema_name, table_name = table.split('.')
schema_name = schema_name.lower()
table_name = table_name.lower()
schema_items = agol_meta_map[schema_name]
for origin_name in schema_items:
if schema_items[origin_name]['title'] == table_name:
origin_table_name.append(f'{schema_name}.{origin_name}')
break
if len(origin_table_name) > 0:
tables = origin_table_name
layer_schema_map = _get_tables_with_fields(internal_sgid, tables)
for schema_name, layer, fields in layer_schema_map:
if if_not_exists and _check_if_exists(cloud_db, schema_name, layer, agol_meta_map):
LOG.info(f'- skipping {Fore.MAGENTA}{schema_name}.{layer} {Fore.CYAN}already exists{Fore.RESET}')
continue
_replace_data(schema_name, layer, fields, agol_meta_map, dry_run)
def _get_table_sets():
'''gets a set of each schema.tablename from the source and destination database to help figure out what is different between them
'''
cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)
internal_sgid = config.get_source_connection()
if cloud_db not in CONNECTION_TABLE_CACHE:
_populate_table_cache(cloud_db)
if internal_sgid not in CONNECTION_TABLE_CACHE:
_populate_table_cache(internal_sgid, pgify=True, name_map=_get_table_meta())
source = set(CONNECTION_TABLE_CACHE[cloud_db])
destination = set(CONNECTION_TABLE_CACHE[internal_sgid])
return source, destination
def trim(dry_run):
'''get source tables with updated names
get destination tables with original names
drop the tables in the destination found in the difference between the two sets
'''
source, destination = _get_table_sets()
items_to_trim = source - destination
items_to_trim_count = len(items_to_trim)
verb = 'are'
noun = 'tables'
if items_to_trim_count == 1:
verb = 'is'
noun = 'table'
LOG.info(f'there {verb} {Fore.CYAN}{items_to_trim_count}{Fore.RESET} {noun} in the destination not in the source')
LOG.verbose(','.join(items_to_trim))
if items_to_trim_count == 0:
return
clean_items = []
for item in items_to_trim:
schema, table = item.split('.')
clean_items.append(f'{schema}."{table}"')
sql = f'DROP TABLE {",".join(clean_items)}'
LOG.info(f'dropping {clean_items}')
if not dry_run:
execute_sql(sql, config.DBO_CONNECTION)
LOG.info(f'{Fore.GREEN}finished{Fore.RESET}')
def update(specific_tables, dry_run):
'''update specific tables in the destination
specific_tables: a list of tables from the source without the schema
dry_run: bool if insertion should actually happen
'''
internal_sgid = config.get_source_connection()
if not specific_tables or len(specific_tables) == 0:
LOG.info(f'{Fore.YELLOW} no tables to import!{Fore.RESET}')
return
layer_schema_map = _get_tables_with_fields(internal_sgid, specific_tables)
if len(layer_schema_map) == 0:
LOG.info(f'{Fore.YELLOW} no matching table found!{Fore.RESET}')
return
agol_meta_map = _get_table_meta()
if len(specific_tables) != len(layer_schema_map):
LOG.warn((
f'{Back.YELLOW}{Fore.BLACK}input {len(specific_tables)} tables but only {len(layer_schema_map)} found.{Fore.RESET}{Back.RESET} '
'check your spelling'
))
for schema_name, layer, fields in layer_schema_map:
_replace_data(schema_name, layer, fields, agol_meta_map, dry_run)
def read_last_check_date():
last_checked = Path('./.last_checked')
if not last_checked.exists():
last_checked.touch()
last_date_string = ''
with open(last_checked, 'r') as log_file:
last_date_string = log_file.readline().strip()
if last_date_string is None or len(last_date_string) < 1:
return None
return last_date_string
def update_last_check_date():
last_checked = Path('./.last_checked')
if not last_checked.exists():
last_checked.touch()
with open(last_checked, 'w') as log_file:
log_file.write(datetime.today().strftime('%Y-%m-%d'))
def get_tables_from_change_detection():
last_checked = read_last_check_date()
if last_checked is None:
last_checked = datetime.today()
else:
last_checked = datetime.strptime(last_checked, '%Y-%m-%d')
LOG.info(f'Checking for changes since {Fore.MAGENTA}{last_checked}{Fore.RESET}')
updated_tables = []
with pyodbc.connect(config.get_source_connection()[6:]) as connection:
cursor = connection.cursor()
cursor.execute("SELECT [TABLE_NAME] FROM | |
import sys
from twisted.internet import defer, task
from twisted.internet.endpoints import clientFromString
from twisted.python.failure import Failure
from twisted.python import usage, log
import click
import txtorcon
from . import carml_readme
from . import carml_check_pypi
from . import carml_stream
from . import carml_events
from . import carml_circ
from . import carml_cmd
from . import carml_monitor
from . import carml_newid
from . import carml_pastebin
from . import carml_copybin
from . import carml_relay
from . import carml_tbb
from . import carml_onion
from . import carml_tmux
from . import carml_xplanet
from . import carml_graph
LOG_LEVELS = ["DEBUG", "INFO", "NOTICE", "WARN", "ERR"]
class LogObserver(object):
def __init__(self, timestamp=False, flush=True):
self.timestamp = timestamp
self.flush = flush
# we keep our own copies of these, because Twisted's
# startLoggingWithObserver overwrites them with its own
# monitoring machinery
self.stdout = sys.stdout
self.stderr = sys.stderr
def __call__(self, arg):
# we don't want to print out every little thing logged by
# Twisted or txtorcon, just what we output (which is always
# print()-ed)
try:
if not arg['printed']:
return
except KeyError:
return
msg = ' '.join(arg['message'])
# possibly add timestamps
if self.timestamp:
msg = util.colors.cyan(time.asctime()) + ': ' + msg
# figure out if we want stdout or stderr
out = self.stdout
if 'isError' in arg and arg['isError']:
out = self.stderr
if not msg and 'failure' in arg:
msg = util.colors.red('Error: ') + arg['failure'].getErrorMessage()
# actually print message
print(msg, file=out)
if self.flush:
out.flush()
class Config(object):
'''
Passed as the Click object (@pass_obj) to all CLI methods.
'''
@click.group()
@click.option('--timestamps', '-t', help='Prepend timestamps to each line.', is_flag=True)
@click.option('--no-color', '-n', help='Same as --color=no.', is_flag=True, default=None)
@click.option('--info', '-i', help='Show version of Tor we connect to (on stderr).', is_flag=True)
@click.option('--quiet', '-q', help='Some commands show less information with this option.', is_flag=True)
@click.option('--debug', '-d', help='Debug; print stack traces on error.', is_flag=True)
@click.option(
'--debug-protocol',
help=('Low-level protocol debugging. Wraps Twisted methods and dumps all bytes '
'read/written in different colours'),
is_flag=True,
)
@click.option(
'--password', '-p',
help=('Password to authenticate to Tor with. Using cookie-based authentication'
'is much easier if you are on the same machine.'),
default=None,
required=False,
)
@click.option(
'--connect', '-c',
default=None,
help=('Where to connect to Tor. This accepts any Twisted client endpoint '
'string, or an ip:port pair. Examples: "tcp:localhost:9151" or '
'"unix:/var/run/tor/control".'),
metavar='ENDPOINT',
)
@click.option(
'--color', '-C',
type=click.Choice(['auto', 'no', 'always']),
default='auto',
help='Colourize output using ANSI commands.',
)
@click.option(
'--json',
default=None,
help="Provide JSON output. Not compatible with all commands.",
is_flag=True,
)
@click.pass_context
def carml(ctx, timestamps, no_color, info, quiet, debug, debug_protocol, password, connect, color, json):
if (color == 'always' and no_color) or \
(color == 'no' and no_color is True):
raise click.UsageError(
"--no-color={} but --color={}".format(no_color, color)
)
cfg = Config()
ctx.obj = cfg
cfg.timestamps = timestamps
cfg.no_color = no_color
cfg.info = info
cfg.quiet = quiet
cfg.debug = debug
cfg.password = password
cfg.connect = connect
cfg.color = color
cfg.debug_protocol = debug_protocol
cfg.json = True if json else False
# start logging
_log_observer = LogObserver()
log.startLoggingWithObserver(_log_observer, setStdout=False)
def _run_command(cmd, cfg, *args, **kwargs):
async def _startup(reactor):
if cfg.connect is None:
ep = None
elif cfg.connect.startswith('tcp:') or cfg.connect.startswith('unix:'):
ep = clientFromString(reactor, cfg.connect)
else:
if ':' in cfg.connect:
ep = clientFromString(reactor, 'tcp:{}'.format(cfg.connect))
else:
ep = clientFromString(reactor, 'tcp:localhost:{}'.format(cfg.connect))
tor = await txtorcon.connect(reactor, ep)
if ep is None:
print("Connected via {}".format(str(tor.protocol.transport.addr, "utf8")))
if cfg.debug_protocol:
click.echo("Low-level protocol debugging: ", nl=False)
click.echo(click.style("data we write to Tor, ", fg='blue'), nl=False)
click.echo(click.style("data from Tor", fg='yellow') + ".")
def write_wrapper(data):
tail = data
while len(tail):
head, tail = tail.split('\r\n', 1)
click.echo(">>> " + click.style(head, fg='blue'), nl=False)
click.echo()
return orig_write(data)
orig_write = tor.protocol.transport.write
tor.protocol.transport.write = write_wrapper
def read_wrapper(data):
tail = data
while '\r\n' in tail:
head, tail = tail.split('\r\n', 1)
if not read_wrapper.write_prefix:
click.echo(click.style(head, fg='yellow'))
read_wrapper.write_prefix = True
else:
click.echo("<<< " + click.style(head, fg='yellow'))
if len(tail):
click.echo("<<< " + click.style(tail, fg='yellow'), nl=False)
read_wrapper.write_prefix = False
else:
click.echo()
return orig_read(data)
read_wrapper.write_prefix = True
orig_read = tor.protocol.dataReceived
tor.protocol.dataReceived = read_wrapper
if cfg.info:
info = await tor.protocol.get_info('version', 'status/version/current', 'dormant')
click.echo(
'Connected to a Tor version "{version}" (status: '
'{status/version/current}).\n'.format(**info)
)
await cmd(reactor, cfg, tor, *args, **kwargs)
from twisted.internet import reactor
codes = [0]
def _the_bad_stuff(f):
print("Error: {}".format(f.value))
if cfg.debug:
print(f.getTraceback())
codes[0] = 1
return None
def _go():
d = defer.ensureDeferred(_startup(reactor))
d.addErrback(_the_bad_stuff)
d.addBoth(lambda _: reactor.stop())
# XXX can't we replace this with react() instead?
reactor.callWhenRunning(_go)
reactor.run()
sys.exit(codes[0])
def _no_json(cfg):
"""
Internal helper, marking this command as not allowing --json
"""
if cfg.json:
raise click.Abort("Doesn't accept --json flag")
@carml.command()
@click.pass_obj
def readme(cfg):
"""
Show the README.rst
"""
_no_json(cfg)
return _run_command(
carml_readme.run,
cfg,
)
@carml.command()
@click.option(
'--package', '-p',
help='Name of the package to check (unfortunately, case matters)',
required=True,
)
@click.option(
'--revision', '-r',
help='Specific version to check (default: latest)',
default=None,
)
@click.pass_obj
def check_pypi(cfg, package, revision):
"""
Check a PyPI package hash across multiple circuits.
"""
_no_json(cfg)
return _run_command(
carml_check_pypi.run,
cfg, package, revision,
)
@carml.command()
@click.option(
'--if-unused', '-u',
help='When deleting, pass the IfUnused flag to Tor.',
is_flag=True,
)
@click.option(
'--verbose',
help='More information per circuit.',
is_flag=True,
)
@click.option(
'--list', '-L',
help='List existing circuits.',
is_flag=True,
default=None,
)
@click.option(
'--build', '-b',
help=('Build a new circuit, given a comma-separated list of router names or'
' IDs. Use "auto" to let Tor select the route.'),
default=None,
)
@click.option(
'--delete',
help='Delete a circuit by its ID.',
default=None,
multiple=True,
type=int,
)
@click.pass_obj
def circ(cfg, if_unused, verbose, list, build, delete):
"""
Manipulate Tor circuits.
"""
_no_json(cfg)
if len([o for o in [list, build, delete] if o]) != 1:
raise click.UsageError(
"Specify just one of --list, --build or --delete"
)
return _run_command(
carml_circ.run,
cfg, if_unused, verbose, list, build, delete,
)
@carml.command()
@click.argument(
"command_args",
nargs=-1,
)
@click.pass_obj
def cmd(cfg, command_args):
"""
Run the rest of the args as a Tor control command. For example
"GETCONF SocksPort" or "GETINFO net/listeners/socks".
"""
_no_json(cfg)
return _run_command(
carml_cmd.run,
cfg, command_args,
)
@carml.command()
@click.option(
'--list', '-L',
help='Show available events.',
is_flag=True,
)
@click.option(
'--once',
help='Output exactly one and quit (same as -n 1 or --count=1).',
is_flag=True,
)
@click.option(
'--show-event', '-s',
help='Prefix each line with the event it is from.',
is_flag=True,
)
@click.option(
'--count', '-n',
help='Output this many events, and quit (default is unlimited).',
type=int,
)
@click.argument(
"events",
nargs=-1,
)
@click.pass_obj
def events(cfg, list, once, show_event, count, events):
"""
Follow any Tor events, listed as positional arguments.
"""
_no_json(cfg)
if len(events) < 1 and not list:
raise click.UsageError(
"Must specify at least one event"
)
return _run_command(
carml_events.run,
cfg, list, once, show_event, count, events,
)
@carml.command()
@click.option(
'--list', '-L',
help='List existing streams.',
is_flag=True,
)
@click.option(
'--follow', '-f',
help='Follow stream creation.',
is_flag=True,
)
@click.option(
'--attach', '-a',
help='Attach all new streams to a particular circuit-id.',
type=int,
default=None,
)
@click.option(
'--close', '-d',
help='Delete/close a stream by its ID.',
type=int,
default=None,
)
@click.option(
'--verbose', '-v',
help='Show more details.',
is_flag=True,
)
@click.pass_context
def stream(ctx, list, follow, attach, close, verbose):
"""
Manipulate Tor streams.
"""
cfg = ctx.obj
_no_json(cfg)
if len([x for x in [list, follow, attach, close] if x]) != 1:
click.echo(ctx.get_help())
raise click.UsageError(
"Must specify one of --list, --follow, --attach or --close"
)
return _run_command(
carml_stream.run,
cfg, list, follow, attach, close, verbose,
)
@carml.command()
@click.option(
'--once', '-o',
help='Exit after printing the current state.',
is_flag=True,
)
@click.option(
'--no-streams', '-s',
help='Without this, list Tor streams.',
is_flag=True,
)
@click.option(
'--no-circuits', '-c',
help='Without this, list Tor circuits.',
is_flag=True,
)
@click.option(
'--no-addr', '-a',
help='Without this, list address mappings (and expirations, with -f).',
is_flag=True,
)
@click.option(
'--no-guards', '-g',
help='Without this, Information about your current Guards.',
is_flag=True,
)
@click.option(
'--verbose', '-v',
help='Additional information. Circuits: ip, location, asn, country-code.',
is_flag=True,
)
@click.option(
'--log-level', '-l',
default=[],
type=click.Choice(LOG_LEVELS),
multiple=True,
)
@click.pass_context
def monitor(ctx, verbose, no_guards, no_addr, no_circuits, no_streams, once, log_level):
"""
General information about a running Tor; streams, circuits,
address-maps and event monitoring.
"""
cfg = ctx.obj
_no_json(cfg)
return _run_command(
carml_monitor.run,
cfg, verbose, no_guards, no_addr, no_circuits, no_streams, once, log_level,
)
@carml.command()
@click.pass_context
def newid(ctx):
"""
Ask Tor for a new identity via NEWNYM, and listen for the response
acknowledgement.
"""
cfg = ctx.obj
_no_json(cfg)
return _run_command(
carml_newid.run,
cfg,
)
@carml.command()
@click.option(
'--dry-run', '-d',
help='Test locally; no Tor launch.',
is_flag=True,
)
@click.option(
'--once',
help='Same as --count=1.',
is_flag=True,
)
@click.option(
'--file', '-f',
default=sys.stdin,
type=click.File('r'),
help='Filename to use as input (instead of stdin)',
)
@click.option(
'--count', '-n',
default=None,
help='Number of requests to serve.',
type=int,
)
@click.option(
'--keys', '-k',
default=0,
help='Number of authentication keys to create.',
type=int,
)
@click.pass_context
def pastebin(ctx, dry_run, once, file, count, keys):
"""
Put stdin (or a file) on a fresh onion-service easily.
"""
if count is not None and count < 0:
raise click.UsageError(
"--count must be positive"
)
if once and count is not None:
raise click.UsageError(
"Only specify one of --count or --once"
)
cfg = ctx.obj
_no_json(cfg)
return _run_command(
carml_pastebin.run,
cfg, dry_run, once, file, count, keys,
)
@carml.command()
@click.option(
'--list',
help='List all relays by hex ID.',
is_flag=True,
)
@click.option(
'--info',
default='',
help='Look up by fingerprint | |
['upstream'],
#'server': ['upstream'],
'upstream': ['http'],
# REF: http://wiki.nginx.org/HttpUseridModule
'userid': ['http', 'server', 'location'],
'userid_domain': ['http', 'server', 'location'],
'userid_expires': ['http', 'server', 'location'],
'userid_name': ['http', 'server', 'location'],
'userid_p3p': ['http', 'server', 'location'],
'userid_path': ['http', 'server', 'location'],
'userid_service': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpUwsgiModule
'uwsgi_bind': ['http', 'server', 'location'],
'uwsgi_buffer_size': ['http', 'server', 'location'],
'uwsgi_buffering': ['http', 'server', 'location'],
'uwsgi_buffers': ['http', 'server', 'location'],
'uwsgi_busy_buffers_size': ['http', 'server', 'location', 'if'],
'uwsgi_cache': ['http', 'server', 'location'],
'uwsgi_cache_bypass': ['http', 'server', 'location'],
'uwsgi_cache_key': ['http', 'server', 'location'],
'uwsgi_cache_lock': ['http', 'server', 'location'],
'uwsgi_cache_lock_timeout': ['http', 'server', 'location'],
'uwsgi_cache_methods': ['http', 'server', 'location'],
'uwsgi_cache_min_uses': ['http', 'server', 'location'],
'uwsgi_cache_path': ['http', 'server', 'location'],
'uwsgi_cache_use_stale': ['http', 'server', 'location'],
'uwsgi_cache_valid': ['http', 'server', 'location'],
'uwsgi_connect_timeout': ['http', 'server', 'location'],
'uwsgi_hide_header': ['http', 'server', 'location'],
'uwsgi_ignore_client_abort': ['http', 'server', 'location'],
'uwsgi_ignore_headers': ['http', 'server', 'location'],
'uwsgi_intercept_errors': ['http', 'server', 'location'],
'uwsgi_max_temp_file_size': ['http', 'server', 'location'], #?
'uwsgi_modifier1': ['server', 'location'],
'uwsgi_modifier2': ['server', 'location'],
'uwsgi_next_upstream': ['http', 'server', 'location'],
'uwsgi_no_cache': ['http', 'server', 'location'],
'uwsgi_param': ['http', 'server', 'location'],
'uwsgi_pass': ['location', 'if in location', 'if'], #+if
'uwsgi_pass_header': ['http', 'server', 'location'],
'uwsgi_pass_request_body': ['http', 'server', 'location'],
'uwsgi_pass_request_headers': ['http', 'server', 'location'],
'uwsgi_read_timeout': ['http', 'server', 'location'],
'uwsgi_send_timeout': ['http', 'server', 'location'],
'uwsgi_store': ['http', 'server', 'location'],
'uwsgi_store_access': ['http', 'server', 'location'],
'uwsgi_string': ['server', 'location'],
'uwsgi_temp_file_write_size': ['http', 'server', 'location', 'if'],
'uwsgi_temp_path': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpSslModule
'ssl': ['http', 'server'],
'ssl_certificate': ['http', 'server'],
'ssl_certificate_key': ['http', 'server'],
'ssl_ciphers': ['http', 'server'],
'ssl_client_certificate': ['http', 'server'],
'ssl_crl': ['http', 'server'],
'ssl_dhparam': ['http', 'server'],
'ssl_prefer_server_ciphers': ['http', 'server'],
'ssl_protocols': ['http', 'server'],
'ssl_verify_client': ['http', 'server'],
'ssl_verify_depth': ['http', 'server'],
'ssl_session_cache': ['http', 'server'],
'ssl_session_timeout': ['http', 'server'],
'ssl_engine': ['http', 'server'],
# REF: http://wiki.nginx.org/X-accel
# no directive
}
# module name : can has param if act as a context
# if False then a module name with param would considered a directive
MODULES = {
'events': False,
'http': False,
'server': False,
'upstream': True,
'location': True,
'types': False,
'if': True,
'limit_except': True,
}
DEFAULTVALS = {
'client_max_body_size': '1m',
'keepalive_timeout': '75s',
}
NGINXCONF = '/etc/nginx/nginx.conf'
SERVERCONF = '/etc/nginx/conf.d/'
COMMENTFLAG = '#v#'
GENBY='GENDBYINPANEL'
def loadconfig(conf=None, getlineinfo=False):
"""Load nginx config and return a dict.
"""
if not conf: conf = NGINXCONF
if not os.path.exists(conf): return False
return _loadconfig(conf, getlineinfo)
def _loadconfig(conf, getlineinfo, config=None, context_stack=None):
"""Recursively load nginx config and return a dict.
"""
if not config:
file_i = 0
context = '_'
config = {'_files': [conf], '_': [{}], '_isdirty': False}
context_stack = [context]
cconfig = config[context][-1]
else:
if getlineinfo:
if conf not in config['_files']:
file_i = len(config['_files'])
config['_files'].append(conf)
else:
file_i = config['_files'].index(conf)
cconfig = config
for c in context_stack: cconfig = cconfig[c][-1]
context = context_stack[-1]
line_buffer = []
with open(conf) as f:
for line_i, line in enumerate(f):
line = line.strip()
if DEBUG:
print('----------' + line)
# deal with our speical comment string
line_disabled = False
if line.startswith(COMMENTFLAG):
while line.startswith(COMMENTFLAG): line = line[3:]
line = line.strip()
line_disabled = True
if not line or line.startswith('#'): continue
# deal with comment and detect inpanel flag in comment
fields = line.split('#', 1)
line = fields[0].strip()
gen_by_inpanel = False
if len(fields) > 1 and fields[1].strip() == GENBY:
gen_by_inpanel = True
# context up
if line == '}':
if getlineinfo:
cconfig['_range']['end'] = {'file': file_i, 'line': [line_i, 1]}
if DEBUG: print context_stack, '-', context_stack[-1],
context_stack.pop()
context = context_stack[-1]
if DEBUG: print '=', context_stack
cconfig = config
for c in context_stack: cconfig = cconfig[c][-1]
continue
# this line may not ending, combine it with next line
if line[-1] not in (';', '{', '}'):
line_buffer.append(line)
continue
elif len(line_buffer)>0:
line_buffer.append(line)
line = ''.join(line_buffer)
line_count = len(line_buffer)
line_buffer = []
else:
line_count = 1
# only support one directive at a line
## one line may contain serveral directives
#parts = re.split('[;{}]', line)
#for part_i, part in enumerate(parts):
# if not part or part.startswith('#'): continue
# fields = part.split()
fields = line.split()
key = fields[0].strip(';') # some directive have no value like ip_hash
#value = ' '.join(fields[1:]).strip()
value = ' '.join(fields[1:]).strip(';')
if key in DIRECTIVES and context in DIRECTIVES[key]:
if (not key in MODULES # not in module name list
or key in MODULES # or in module name list
and not MODULES[key] # and this module can't has param
and value != '{'): # but actually it has
if getlineinfo:
v = {'file': file_i, 'line': [line_i-line_count+1, line_count], 'value': value}
else:
v = value
if key in cconfig:
cconfig[key].append(v)
else:
cconfig[key] = [v]
if key == 'include': # expand include files
includepath = value
if not includepath.startswith('/'):
includepath = os.path.join(os.path.dirname(config['_files'][0]), includepath)
confs = glob(includepath)
# order by domain name, excluding tld
getdm = lambda x: x.split('/')[-1].split('.')[-3::-1]
confs = sorted(confs, lambda x,y: cmp(getdm(x), getdm(y)))
for subconf in confs:
if os.path.exists(subconf):
if DEBUG: print '\n**********', subconf, '\n'
_loadconfig(subconf, getlineinfo, config, context_stack)
else:
context = key
if DEBUG: print context_stack, '+', context,
context_stack.append(context)
if DEBUG: print '=', context_stack
if context in cconfig:
cconfig[context].append({})
else:
cconfig[context] = [{}]
cconfig = cconfig[context][-1]
value = value.strip('{').strip()
if getlineinfo:
cconfig['_param'] = {'file': file_i, 'line': [line_i-line_count+1, line_count], 'disabled': line_disabled, 'value': value}
# record the context range
cconfig['_range'] = {'begin': {'file': file_i, 'line': [line_i-line_count+1, line_count]}}
cconfig['_inpanel'] = gen_by_inpanel
else:
cconfig['_param'] = value
cconfig['_disabled'] = line_disabled
cconfig['_inpanel'] = gen_by_inpanel
return config
def _context_gethttp(config=None):
"""Get http context config.
"""
if not config or config['_isdirty']:
config = loadconfig(NGINXCONF, True)
return config['_'][0]['http'][0]
def _context_getservers(disabled=None, config=None, getlineinfo=True):
"""Get server context configs.
"""
if not config or config['_isdirty']:
config = loadconfig(NGINXCONF, getlineinfo)
http = config['_'][0]['http'][0]
if not 'server' in http:
return []
servers = http['server']
if disabled == None or not getlineinfo:
return servers
else:
return [server for server in servers
if server['_param']['disabled']==disabled]
def _context_getserver(ip, port, server_name, config=None, disabled=None, getlineinfo=True):
"""Get a server context config by server name.
If disabled is set to None, all servers would be return.
If disabled is set to True, only disabled servers would be return.
If disabled is set to False, only normal servers would be return.
"""
if not config or config['_isdirty']:
config = loadconfig(NGINXCONF, getlineinfo)
cnfservers = _context_getservers(disabled=disabled, config=config, getlineinfo=getlineinfo)
if not ip or ip in ('*', '0.0.0.0'): ip = ''
if is_valid_ipv6(ip) and not is_valid_ipv4(ip):
ip = '[' + ip + ']'
for s in cnfservers:
if getlineinfo:
server_names = ' '.join([v['value'] for v in s['server_name']]).split()
listens = [v['value'].split()[0] for v in s['listen']]
else:
server_names = ' '.join([v for v in s['server_name']]).split()
listens = [v.split()[0] for v in s['listen']]
find_listen = ip and ['%s:%s' % (ip, port)] or [port, '*:%s' % port, '0.0.0.0:%s' % port]
if server_name in server_names and any([i in listens for i in find_listen]):
return s
return False
def _context_getupstreams(server_name, config=None, disabled=None, getlineinfo=True):
"""Get upstream list related to a server.
"""
if not config or config['_isdirty']:
config = loadconfig(NGINXCONF, getlineinfo)
upstreams = http_get('upstream', config)
if not upstreams: return False
if getlineinfo:
upstreams = [upstream for upstream in upstreams
if upstream['_param']['value'].startswith('backend_of_%s_' % server_name)]
else:
upstreams = [upstream for upstream in upstreams
if upstream['_param'].startswith('backend_of_%s_' % server_name)]
if disabled == None or not getlineinfo:
return upstreams
else:
return [upstream for upstream in upstreams
if upstream['_param']['disabled']==disabled]
def _comment(filepath, start, end):
"""Commend some lines in the file.
"""
if not os.path.exists(filepath): return False
data = []
with open(filepath) as f:
for i, line in enumerate(f):
if i>=start and i<=end:
if not line.startswith(COMMENTFLAG): data.append(COMMENTFLAG)
data.append(line)
with open(filepath, 'w') as f: f.write(''.join(data))
return True
def _uncomment(filepath, start, end):
"""Uncommend some lines in the file.
"""
if not os.path.exists(filepath): return False
data = []
with open(filepath) as f:
for i, line in enumerate(f):
if i>=start and i<=end:
while line.startswith(COMMENTFLAG): line = line[3:]
data.append(line)
with open(filepath, 'w') as f: f.write(''.join(data))
return True
def _delete(filepath, start, end, delete_emptyfile=True):
"""Delete some lines in the file.
If delete_emptyfile is set to True, then the empty file will
be deleted from file system.
"""
if not os.path.exists(filepath): return False
data = []
with open(filepath) as f:
for i, line in enumerate(f):
if i>=start and i<=end: continue
data.append(line)
with open(filepath, 'w') as f: f.write(''.join(data))
if delete_emptyfile:
if ''.join(data).strip() == '': os.unlink(filepath)
return True
def _getcontextrange(context, config):
"""Return the range of the input context, including the file path.
Return format:
[filepath, line_start, line_end]
"""
file_i = context['_range']['begin']['file']
filepath = config['_files'][file_i]
line_start | |
<reponame>ldfaiztt/redis-in-action
import bisect
from collections import defaultdict, deque
import json
import math
import os
import time
import unittest
import uuid
import zlib
import redis
QUIT = False
pipe = inv = item = buyer = seller = inventory = None
# <start id="_1314_14473_8380"/>
def add_update_contact(conn, user, contact):
ac_list = 'recent:' + user
pipeline = conn.pipeline(True) #A
pipeline.lrem(ac_list, contact) #B
pipeline.lpush(ac_list, contact) #C
pipeline.ltrim(ac_list, 0, 99) #D
pipeline.execute() #E
# <end id="_1314_14473_8380"/>
#A Set up the atomic operation
#B Remove the contact from the list if it exists
#C Push the item onto the front of the list
#D Remove anything beyond the 100th item
#E Actually execute everything
#END
# <start id="_1314_14473_8383"/>
def remove_contact(conn, user, contact):
conn.lrem('recent:' + user, contact)
# <end id="_1314_14473_8383"/>
#END
# <start id="_1314_14473_8386"/>
def fetch_autocomplete_list(conn, user, prefix):
candidates = conn.lrange('recent:' + user, 0, -1) #A
matches = []
for candidate in candidates: #B
if candidate.lower().startswith(prefix): #B
matches.append(candidate) #C
return matches #D
# <end id="_1314_14473_8386"/>
#A Fetch the autocomplete list
#B Check each candidate
#C We found a match
#D Return all of the matches
#END
# <start id="_1314_14473_8396"/>
valid_characters = '`abcdefghijklmnopqrstuvwxyz{' #A
def find_prefix_range(prefix):
posn = bisect.bisect_left(valid_characters, prefix[-1:]) #B
suffix = valid_characters[(posn or 1) - 1] #C
return prefix[:-1] + suffix + '{', prefix + '{' #D
# <end id="_1314_14473_8396"/>
#A Set up our list of characters that we know about
#B Find the position of prefix character in our list of characters
#C Find the predecessor character
#D Return the range
#END
# <start id="_1314_14473_8399"/>
def autocomplete_on_prefix(conn, guild, prefix):
start, end = find_prefix_range(prefix) #A
identifier = str(uuid.uuid4()) #A
start += identifier #A
end += identifier #A
zset_name = 'members:' + guild
conn.zadd(zset_name, start, 0, end, 0) #B
pipeline = conn.pipeline(True)
while 1:
try:
pipeline.watch(zset_name)
sindex = pipeline.zrank(zset_name, start) #C
eindex = pipeline.zrank(zset_name, end) #C
erange = min(sindex + 9, eindex - 2) #C
pipeline.multi()
pipeline.zrem(zset_name, start, end) #D
pipeline.zrange(zset_name, sindex, erange) #D
items = pipeline.execute()[-1] #D
break
except redis.exceptions.WatchError: #E
continue #E
return [item for item in items if '{' not in item] #F
# <end id="_1314_14473_8399"/>
#A Find the start/end range for the prefix
#B Add the start/end range items to the ZSET
#C Find the ranks of our end points
#D Get the values inside our range, and clean up
#E Retry if someone modified our autocomplete zset
#F Remove start/end entries if an autocomplete was in progress
#END
# <start id="_1314_14473_8403"/>
def join_guild(conn, guild, user):
conn.zadd('members:' + guild, user, 0)
def leave_guild(conn, guild, user):
conn.zrem('members:' + guild, user)
# <end id="_1314_14473_8403"/>
#END
# <start id="_1314_14473_8431"/>
def list_item(conn, itemid, sellerid, price):
#...
pipe.watch(inv) #A
if not pipe.sismember(inv, itemid): #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.zadd("market:", item, price) #C
pipe.srem(inv, itemid) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8431"/>
#A Watch for changes to the users's inventory
#B Verify that the user still has the item to be listed
#C Actually list the item
#END
# <start id="_1314_14473_8435"/>
def purchase_item(conn, buyerid, itemid, sellerid, lprice):
#...
pipe.watch("market:", buyer) #A
price = pipe.zscore("market:", item) #B
funds = int(pipe.hget(buyer, 'funds')) #B
if price != lprice or price > funds: #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.hincrby(seller, 'funds', int(price)) #C
pipe.hincrby(buyerid, 'funds', int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8435"/>
#A Watch for changes to the market and the buyer's account information
#B Check for a sold/repriced item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#END
# <start id="_1314_14473_8641"/>
def acquire_lock(conn, lockname, acquire_timeout=10):
identifier = str(uuid.uuid4()) #A
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx('lock:' + lockname, identifier): #B
return identifier
time.sleep(.001)
return False
# <end id="_1314_14473_8641"/>
#A A 128-bit random identifier
#B Get the lock
#END
# <start id="_1314_14473_8645"/>
def purchase_item_with_lock(conn, buyerid, itemid, sellerid):
buyer = "users:%s" % buyerid
seller = "users:%s" % sellerid
item = "%s.%s" % (itemid, sellerid)
inventory = "inventory:%s" % buyerid
locked = acquire_lock(conn, 'market:') #A
if not locked:
return False
pipe = conn.pipeline(True)
try:
pipe.zscore("market:", item) #B
pipe.hget(buyer, 'funds') #B
price, funds = pipe.execute() #B
if price is None or price > funds: #B
return None #B
pipe.hincrby(seller, 'funds', int(price)) #C
pipe.hincrby(buyer, 'funds', int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
finally:
release_lock(conn, 'market:', locked) #D
# <end id="_1314_14473_8645"/>
#A Get the lock
#B Check for a sold item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#D Release the lock
#END
# <start id="_1314_14473_8650"/>
def release_lock(conn, lockname, identifier):
pipe = conn.pipeline(True)
lockname = 'lock:' + lockname
while True:
try:
pipe.watch(lockname) #A
if pipe.get(lockname) == identifier: #A
pipe.multi() #B
pipe.delete(lockname) #B
pipe.execute() #B
return True #B
pipe.unwatch()
break
except redis.exceptions.WatchError: #C
pass #C
return False #D
# <end id="_1314_14473_8650"/>
#A Check and verify that we still have the lock
#B Release the lock
#C Someone else did something with the lock, retry
#D We lost the lock
#END
# <start id="_1314_14473_8790"/>
def acquire_lock_with_timeout(
conn, lockname, acquire_timeout=10, lock_timeout=10):
identifier = str(uuid.uuid4()) #A
lockname = 'lock:' + lockname
lock_timeout = int(math.ceil(lock_timeout)) #D
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx(lockname, identifier): #B
conn.expire(lockname, lock_timeout) #B
return identifier
elif not conn.ttl(lockname): #C
conn.expire(lockname, lock_timeout) #C
time.sleep(.001)
return False
# <end id="_1314_14473_8790"/>
#A A 128-bit random identifier
#B Get the lock and set the expiration
#C Check and update the expiration time as necessary
#D Only pass integers to our EXPIRE calls
#END
# <start id="_1314_14473_8986"/>
def acquire_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zadd(semname, identifier, now) #C
pipeline.zrank(semname, identifier) #D
if pipeline.execute()[-1] < limit: #D
return identifier
conn.zrem(semname, identifier) #E
return None
# <end id="_1314_14473_8986"/>
#A A 128-bit random identifier
#B Time out old semaphore holders
#C Try to acquire the semaphore
#D Check to see if we have it
#E We failed to get the semaphore, discard our identifier
#END
# <start id="_1314_14473_8990"/>
def release_semaphore(conn, semname, identifier):
return conn.zrem(semname, identifier) #A
# <end id="_1314_14473_8990"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9004"/>
def acquire_fair_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
czset = semname + ':owner'
ctr = semname + ':counter'
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zinterstore(czset, {czset: 1, semname: 0}) #B
pipeline.incr(ctr) #C
counter = pipeline.execute()[-1] #C
pipeline.zadd(semname, identifier, now) #D
pipeline.zadd(czset, identifier, counter) #D
pipeline.zrank(czset, identifier) #E
if pipeline.execute()[-1] < limit: #E
return identifier #F
pipeline.zrem(semname, identifier) #G
pipeline.zrem(czset, identifier) #G
pipeline.execute()
return None
# <end id="_1314_14473_9004"/>
#A A 128-bit random identifier
#B Time out old entries
#C Get the counter
#D Try to acquire the semaphore
#E Check the rank to determine if we got the semaphore
#F We got the semaphore
#G We didn't get the semaphore, clean out the bad data
#END
# <start id="_1314_14473_9014"/>
def release_fair_semaphore(conn, semname, identifier):
pipeline = conn.pipeline(True)
pipeline.zrem(semname, identifier)
pipeline.zrem(semname + ':owner', identifier)
return pipeline.execute()[0] #A
# <end id="_1314_14473_9014"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9022"/>
def refresh_fair_semaphore(conn, semname, identifier):
if conn.zadd(semname, identifier, time.time()): #A
release_fair_semaphore(conn, semname, identifier) #B
return False #B
return True #C
# <end id="_1314_14473_9022"/>
#A Update our semaphore
#B We lost our semaphore, report back
#C We still have our semaphore
#END
# <start id="_1314_14473_9031"/>
def acquire_semaphore_with_lock(conn, semname, limit, timeout=10):
identifier = acquire_lock(conn, semname, acquire_timeout=.01)
if identifier:
try:
return acquire_fair_semaphore(conn, semname, limit, timeout)
finally:
release_lock(conn, semname, identifier)
# <end id="_1314_14473_9031"/>
#END
# <start id="_1314_14473_9056"/>
def send_sold_email_via_queue(conn, seller, item, price, buyer):
data = {
'seller_id': seller, #A
'item_id': item, #A
'price': price, #A
'buyer_id': buyer, #A
'time': time.time() #A
}
conn.rpush('queue:email', json.dumps(data)) #B
# <end id="_1314_14473_9056"/>
#A Prepare the item
#B Push the item onto the queue
#END
# <start id="_1314_14473_9060"/>
def process_sold_email_queue(conn):
while not QUIT:
packed = conn.blpop(['queue:email'], 30) #A
if not packed: #B
continue #B
to_send = json.loads(packed[1]) #C
try:
fetch_data_and_send_sold_email(to_send) #D
except EmailSendError as err:
log_error("Failed to send sold email", err, to_send)
else:
log_success("Sent sold email", to_send)
# <end id="_1314_14473_9060"/>
#A Try to get a message to send
#B No message to send, try again
#C Load the packed email information
#D Send the email using our pre-written emailing function
#END
# <start id="_1314_14473_9066"/>
def worker_watch_queue(conn, queue, callbacks):
while not QUIT:
packed = conn.blpop([queue], 30) #A
if not packed: #B
continue #B
name, args = json.loads(packed[1]) #C
if name not in callbacks: #D
log_error("Unknown callback %s"%name) #D
continue #D
callbacks[name](*args) #E
# <end id="_1314_14473_9066"/>
#A Try to get an item from the queue
#B There is nothing to work on, try again
#C | |
"Security Definer",
p.proleakproof as "Leak Proof",
p.proisstrict as "Is Strict",
(case p.provolatile when 'i' then 'Immutable' when 's' then 'Stable' when 'v' then 'Volatile' end) as "Volatile",
(case p.proparallel when 's' then 'Safe' when 'r' then 'Restricted' when 'u' then 'Unsafe' end) as "Parallel",
p.pronargs as "Number of Arguments",
p.pronargdefaults as "Number of Default Arguments",
p.probin as "Invoke",
p.proconfig as "Configuration",
p.proacl as "ACL"
from pg_proc p
join pg_namespace n
on p.pronamespace = n.oid
inner join pg_roles r
on r.oid = p.proowner
inner join pg_language l
on l.oid = p.prolang
where quote_ident(n.nspname) || '.' || quote_ident(p.proname) || '(' || oidvectortypes(p.proargtypes) || ')' = '{0}'
and p.prokind = 'p'
'''.format(p_object))
def GetPropertiesTrigger(self, p_schema, p_table, p_object):
return self.v_connection.Query('''
select current_database() as "Database",
y.schema_name as "Schema",
y.table_name as "Table",
y.trigger_name as "Trigger",
y.oid as "OID",
y.trigger_enabled as "Enabled",
y.trigger_function_name as "Trigger Function",
x.action_timing as "Action Timing",
x.event_manipulation as "Action Manipulation",
x.action_orientation as "Action Orientation",
x.action_condition as "Action Condition",
x.action_statement as "Action Statement"
from (
select distinct quote_ident(t.event_object_schema) as schema_name,
quote_ident(t.event_object_table) as table_name,
quote_ident(t.trigger_name) as trigger_name,
t.action_timing,
array_to_string(array(
select t2.event_manipulation::text
from information_schema.triggers t2
where t2.event_object_schema = t.event_object_schema
and t2.event_object_table = t.event_object_table
and t2.trigger_name = t.trigger_name
), ' OR ') as event_manipulation,
t.action_orientation,
t.action_condition,
t.action_statement
from information_schema.triggers t
where quote_ident(t.event_object_schema) = '{0}'
and quote_ident(t.event_object_table) = '{1}'
and quote_ident(t.trigger_name) = '{2}'
) x
inner join (
select t.oid,
quote_ident(n.nspname) as schema_name,
quote_ident(c.relname) as table_name,
quote_ident(t.tgname) as trigger_name,
t.tgenabled as trigger_enabled,
quote_ident(np.nspname) || '.' || quote_ident(p.proname) as trigger_function_name,
quote_ident(np.nspname) || '.' || quote_ident(p.proname) || '()' as trigger_function_id
from pg_trigger t
inner join pg_class c
on c.oid = t.tgrelid
inner join pg_namespace n
on n.oid = c.relnamespace
inner join pg_proc p
on p.oid = t.tgfoid
inner join pg_namespace np
on np.oid = p.pronamespace
where not t.tgisinternal
and quote_ident(n.nspname) = '{0}'
and quote_ident(c.relname) = '{1}'
and quote_ident(t.tgname) = '{2}'
) y
on y.schema_name = x.schema_name
and y.table_name = x.table_name
and y.trigger_name = x.trigger_name
'''.format(p_schema, p_table, p_object))
def GetPropertiesEventTrigger(self, p_object):
return self.v_connection.Query('''
select current_database() as "Database",
quote_ident(t.evtname) as "Event Trigger Name",
t.evtevent as "Event",
array_to_string(t.evttags, ', ') as "Tags",
t.oid as "OID",
t.evtenabled as "Enabled",
r.rolname as "Owner",
quote_ident(np.nspname) || '.' || quote_ident(p.proname) as "Event Trigger Function"
from pg_event_trigger t
inner join pg_proc p
on p.oid = t.evtfoid
inner join pg_namespace np
on np.oid = p.pronamespace
inner join pg_roles r
on r.oid = t.evtowner
where quote_ident(t.evtname) = '{0}'
'''.format(p_object))
def GetPropertiesPK(self, p_schema, p_table, p_object):
return self.v_connection.Query('''
create or replace function pg_temp.fnc_omnidb_constraint_attrs(text, text, text)
returns text as $$
select array_to_string(array(
select a.attname
from (
select unnest(c.conkey) as conkey
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'p'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_attribute a
on a.attnum = x.conkey
inner join pg_class r
on r.oid = a.attrelid
inner join pg_namespace n
on n.oid = r.relnamespace
where quote_ident(n.nspname) = $1
and quote_ident(r.relname) = $2
), ',')
$$ language sql;
select current_database() as "Database",
quote_ident(n.nspname) as "Schema",
quote_ident(t.relname) as "Table",
quote_ident(c.conname) as "Constraint Name",
c.oid as "OID",
(case c.contype when 'c' then 'Check' when 'f' then 'Foreign Key' when 'p' then 'Primary Key' when 'u' then 'Unique' when 'x' then 'Exclusion' end) as "Constraint Type",
pg_temp.fnc_omnidb_constraint_attrs(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "Constrained Columns",
quote_ident(i.relname) as "Index",
c.condeferrable as "Deferrable",
c.condeferred as "Deferred by Default",
c.convalidated as "Validated",
c.conislocal as "Is Local",
c.coninhcount as "Number of Ancestors",
c.connoinherit as "Non-Inheritable"
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
join pg_class i
on i.oid = c.conindid
where contype = 'p'
and quote_ident(n.nspname) = '{0}'
and quote_ident(t.relname) = '{1}'
and quote_ident(c.conname) = '{2}'
'''.format(p_schema, p_table, p_object))
def GetPropertiesFK(self, p_schema, p_table, p_object):
return self.v_connection.Query('''
create or replace function pg_temp.fnc_omnidb_constraint_attrs(text, text, text)
returns text as $$
select array_to_string(array(
select a.attname
from (
select unnest(c.conkey) as conkey
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'f'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_attribute a
on a.attnum = x.conkey
inner join pg_class r
on r.oid = a.attrelid
inner join pg_namespace n
on n.oid = r.relnamespace
where quote_ident(n.nspname) = $1
and quote_ident(r.relname) = $2
), ',')
$$ language sql;
create or replace function pg_temp.fnc_omnidb_rconstraint_attrs(text, text, text)
returns text as $$
select array_to_string(array(
select a.attname
from (
select unnest(c.confkey) as confkey
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'f'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_attribute a
on a.attnum = x.confkey
inner join pg_class r
on r.oid = a.attrelid
inner join pg_namespace n
on n.oid = r.relnamespace
where quote_ident(n.nspname) = $1
and quote_ident(r.relname) = $2
), ',')
$$ language sql;
create or replace function pg_temp.fnc_omnidb_pfconstraint_ops(text, text, text)
returns text as $$
select array_to_string(array(
select oprname
from (
select o.oprname
from (
select unnest(c.conpfeqop) as conpfeqop
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'x'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_operator o
on o.oid = x.conpfeqop
) t
), ',')
$$ language sql;
create or replace function pg_temp.fnc_omnidb_ppconstraint_ops(text, text, text)
returns text as $$
select array_to_string(array(
select oprname
from (
select o.oprname
from (
select unnest(c.conppeqop) as conppeqop
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'x'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_operator o
on o.oid = x.conppeqop
) t
), ',')
$$ language sql;
create or replace function pg_temp.fnc_omnidb_ffconstraint_ops(text, text, text)
returns text as $$
select array_to_string(array(
select oprname
from (
select o.oprname
from (
select unnest(c.conffeqop) as conffeqop
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where contype = 'x'
and quote_ident(n.nspname) = $1
and quote_ident(t.relname) = $2
and quote_ident(c.conname) = $3
) x
inner join pg_operator o
on o.oid = x.conffeqop
) t
), ',')
$$ language sql;
select current_database() as "Database",
quote_ident(n.nspname) as "Schema",
quote_ident(t.relname) as "Table",
quote_ident(c.conname) as "Constraint Name",
c.oid as "OID",
(case c.contype when 'c' then 'Check' when 'f' then 'Foreign Key' when 'p' then 'Primary Key' when 'u' then 'Unique' when 'x' then 'Exclusion' end) as "Constraint Type",
pg_temp.fnc_omnidb_constraint_attrs(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "Constrained Columns",
quote_ident(i.relname) as "Index",
quote_ident(nr.nspname) as "Referenced Schema",
quote_ident(tr.relname) as "Referenced Table",
pg_temp.fnc_omnidb_rconstraint_attrs(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "Referenced Columns",
(case c.confupdtype when 'a' then 'No Action' when 'r' then 'Restrict' when 'c' then 'Cascade' when 'n' then 'Set Null' when 'd' then 'Set Default' end) as "Update Action",
(case c.confdeltype when 'a' then 'No Action' when 'r' then 'Restrict' when 'c' then 'Cascade' when 'n' then 'Set Null' when 'd' then 'Set Default' end) as "Delete Action",
(case c.confmatchtype when 'f' then 'Full' when 'p' then 'Partial' when 's' then 'Simple' end) as "Match Type",
c.condeferrable as "Deferrable",
c.condeferred as "Deferred by Default",
c.convalidated as "Validated",
c.conislocal as "Is Local",
c.coninhcount as "Number of Ancestors",
c.connoinherit as "Non-Inheritable",
pg_temp.fnc_omnidb_pfconstraint_ops(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "PK=FK Equality Operators",
pg_temp.fnc_omnidb_ppconstraint_ops(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "PK=PK Equality Operators",
pg_temp.fnc_omnidb_ffconstraint_ops(
quote_ident(n.nspname),
quote_ident(t.relname),
quote_ident(c.conname)
) as "FK=FK Equality Operators"
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
join pg_class i
on i.oid = c.conindid
join pg_class tr
on tr.oid = c.confrelid
join pg_namespace nr
on tr.relnamespace = nr.oid
where contype = 'f'
and quote_ident(n.nspname) = '{0}'
and quote_ident(t.relname) = '{1}'
and quote_ident(c.conname) = | |
'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x+2*y)
sage: f.domain()
2-dimensional topological manifold M
sage: U = M.open_subset('U', coord_def={c_xy: x<0})
sage: g = f.restrict(U)
sage: g.domain()
Open subset U of the 2-dimensional topological manifold M
"""
return self._domain
def copy(self, name=None, latex_name=None):
r"""
Return an exact copy of the scalar field.
INPUT:
- ``name`` -- (default: ``None``) name given to the copy
- ``latex_name`` -- (default: ``None``) LaTeX symbol to denote the
copy; if none is provided, the LaTeX symbol is set to ``name``
EXAMPLES:
Copy on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x*y^2)
sage: g = f.copy()
sage: type(g)
<class 'sage.manifolds.scalarfield_algebra.ScalarFieldAlgebra_with_category.element_class'>
sage: g.expr()
x*y^2
sage: g == f
True
sage: g is f
False
"""
result = type(self)(self.parent(), name=name, latex_name=latex_name)
for chart, funct in self._express.items():
result._express[chart] = funct.copy()
result._is_zero = self._is_zero
return result
def coord_function(self, chart=None, from_chart=None):
r"""
Return the function of the coordinates representing the scalar field
in a given chart.
INPUT:
- ``chart`` -- (default: ``None``) chart with respect to which the
coordinate expression is to be returned; if ``None``, the
default chart of the scalar field's domain will be used
- ``from_chart`` -- (default: ``None``) chart from which the
required expression is computed if it is not known already in the
chart ``chart``; if ``None``, a chart is picked in the known
expressions
OUTPUT:
- instance of :class:`~sage.manifolds.chart_func.ChartFunction`
representing the coordinate function of the scalar field in the
given chart
EXAMPLES:
Coordinate function on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x*y^2)
sage: f.coord_function()
x*y^2
sage: f.coord_function(c_xy) # equivalent form (since c_xy is the default chart)
x*y^2
sage: type(f.coord_function())
<class 'sage.manifolds.chart_func.ChartFunctionRing_with_category.element_class'>
Expression via a change of coordinates::
sage: c_uv.<u,v> = M.chart()
sage: c_uv.transition_map(c_xy, [u+v, u-v])
Change of coordinates from Chart (M, (u, v)) to Chart (M, (x, y))
sage: f._express # at this stage, f is expressed only in terms of (x,y) coordinates
{Chart (M, (x, y)): x*y^2}
sage: f.coord_function(c_uv) # forces the computation of the expression of f in terms of (u,v) coordinates
u^3 - u^2*v - u*v^2 + v^3
sage: f.coord_function(c_uv) == (u+v)*(u-v)^2 # check
True
sage: f._express # random (dict. output); f has now 2 coordinate expressions:
{Chart (M, (x, y)): x*y^2, Chart (M, (u, v)): u^3 - u^2*v - u*v^2 + v^3}
Usage in a physical context (simple Lorentz transformation - boost in
``x`` direction, with relative velocity ``v`` between ``o1``
and ``o2`` frames)::
sage: M = Manifold(2, 'M', structure='topological')
sage: o1.<t,x> = M.chart()
sage: o2.<T,X> = M.chart()
sage: f = M.scalar_field(x^2 - t^2)
sage: f.coord_function(o1)
-t^2 + x^2
sage: v = var('v'); gam = 1/sqrt(1-v^2)
sage: o2.transition_map(o1, [gam*(T - v*X), gam*(X - v*T)])
Change of coordinates from Chart (M, (T, X)) to Chart (M, (t, x))
sage: f.coord_function(o2)
-T^2 + X^2
"""
if chart is None:
chart = self._domain._def_chart
else:
if chart not in self._domain._atlas:
raise ValueError("the {} is not a chart ".format(chart) +
"defined on the {}".format(self._domain))
if chart not in self._express:
# Check whether chart corresponds to a subchart of a chart
# where the expression of self is known:
for known_chart in self._express:
if chart in known_chart._subcharts:
new_expr = self._express[known_chart].expr()
self._express[chart] = chart.function(new_expr)
return self._express[chart]
# If this point is reached, the expression must be computed
# from that in the chart from_chart, by means of a
# change-of-coordinates formula:
if from_chart is None:
# from_chart in searched among the charts of known expressions
# and subcharts of them
known_express = self._express.copy()
found = False
for kchart in known_express:
for skchart in kchart._subcharts:
if (chart, skchart) in self._domain._coord_changes:
from_chart = skchart
found = True
if skchart not in self._express:
self._express[skchart] = skchart.function(
self._express[kchart].expr())
break
if found:
break
if not found:
raise ValueError("no starting chart could be found to " +
"compute the expression in the {}".format(chart))
change = self._domain._coord_changes[(chart, from_chart)]
# old coordinates expressed in terms of the new ones:
coords = [ change._transf._functions[i].expr()
for i in range(self._manifold.dim()) ]
new_expr = self._express[from_chart](*coords)
self._express[chart] = chart.function(new_expr)
self._del_derived()
return self._express[chart]
def expr(self, chart=None, from_chart=None):
r"""
Return the coordinate expression of the scalar field in a given
chart.
INPUT:
- ``chart`` -- (default: ``None``) chart with respect to which the
coordinate expression is required; if ``None``, the default
chart of the scalar field's domain will be used
- ``from_chart`` -- (default: ``None``) chart from which the
required expression is computed if it is not known already in the
chart ``chart``; if ``None``, a chart is picked in ``self._express``
OUTPUT:
- the coordinate expression of the scalar field in the given chart,
either as a Sage's symbolic expression or as a SymPy object,
depending on the symbolic calculus method used on the chart
EXAMPLES:
Expression of a scalar field on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x*y^2)
sage: f.expr()
x*y^2
sage: f.expr(c_xy) # equivalent form (since c_xy is the default chart)
x*y^2
Expression via a change of coordinates::
sage: c_uv.<u,v> = M.chart()
sage: c_uv.transition_map(c_xy, [u+v, u-v])
Change of coordinates from Chart (M, (u, v)) to Chart (M, (x, y))
sage: f._express # at this stage, f is expressed only in terms of (x,y) coordinates
{Chart (M, (x, y)): x*y^2}
sage: f.expr(c_uv) # forces the computation of the expression of f in terms of (u,v) coordinates
u^3 - u^2*v - u*v^2 + v^3
sage: bool( f.expr(c_uv) == (u+v)*(u-v)^2 ) # check
True
sage: f._express # random (dict. output); f has now 2 coordinate expressions:
{Chart (M, (x, y)): x*y^2, Chart (M, (u, v)): u^3 - u^2*v - u*v^2 + v^3}
Note that the object returned by ``expr()`` depends on the symbolic
backend used for coordinate computations::
sage: type(f.expr())
<type 'sage.symbolic.expression.Expression'>
sage: M.set_calculus_method('sympy')
sage: type(f.expr())
<class 'sympy.core.mul.Mul'>
sage: f.expr() # note the SymPy exponent notation
x*y**2
"""
return self.coord_function(chart, from_chart).expr()
def set_expr(self, coord_expression, chart=None):
r"""
Set the coordinate expression of the scalar field.
The expressions with respect to other charts are deleted, in order to
avoid any inconsistency. To keep them, use :meth:`add_expr` instead.
INPUT:
- ``coord_expression`` -- coordinate expression of the scalar field
- ``chart`` -- (default: ``None``) chart in which ``coord_expression``
is defined; if ``None``, the default chart of the scalar field's
domain is assumed
EXAMPLES:
Setting scalar field expressions on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x^2 + 2*x*y +1)
sage: f._express
{Chart (M, (x, y)): x^2 + 2*x*y + 1}
sage: f.set_expr(3*y)
sage: f._express # the (x,y) expression has been changed:
{Chart (M, (x, y)): 3*y}
sage: c_uv.<u,v> = M.chart()
sage: f.set_expr(cos(u)-sin(v), c_uv)
sage: f._express # the (x,y) expression has been lost:
{Chart (M, (u, v)): cos(u) - sin(v)}
sage: f.set_expr(3*y)
sage: f._express # the (u,v) expression has been lost:
{Chart (M, (x, y)): 3*y}
Since zero and one are special elements, their expressions cannot be
changed::
sage: z = M.zero_scalar_field()
sage: z.set_expr(3*y)
Traceback (most recent call last):
...
AssertionError: the expressions of the element zero cannot be changed
sage: one = M.one_scalar_field()
sage: one.set_expr(3*y)
Traceback (most recent call last):
...
AssertionError: the expressions of the element 1 cannot be changed
"""
if self is self.parent().one() or self is self.parent().zero():
raise AssertionError("the expressions of the element "
"{} cannot be changed".format(self._name))
if chart is None:
chart = self._domain._def_chart
self._express.clear()
self._express[chart] = chart.function(coord_expression)
self._is_zero = False # a priori
self._del_derived()
def add_expr(self, coord_expression, chart=None):
r"""
Add some coordinate expression to the scalar field.
The previous expressions with respect to other charts are kept. To
clear them, use :meth:`set_expr` instead.
INPUT:
- ``coord_expression`` -- coordinate expression of the scalar field
- ``chart`` -- (default: ``None``) chart in which ``coord_expression``
is defined; if ``None``, the default chart of the scalar field's
domain | |
False)
#Run through a series of differet finds to ensure the various parameter filters are working.
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_org_negotiations(org2_id,negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.RequestRoleProposal, headers=actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),1)
#Manager rejects the initial role proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.REJECTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_org_negotiations(org2_id,negotiation_status=NegotiationStatusEnum.REJECTED, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
self.assertEqual(negotiations[0].negotiation_status, NegotiationStatusEnum.REJECTED)
#Make sure the user still does not have the requested role
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 2)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.REJECTED])
#Create a second proposal to add a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),3)
closed_negotiations = self.org_client.find_org_closed_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(closed_negotiations),2)
#Create an instrument resource
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),0)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')
#Intruments should not be able to be created by anoymous users
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=self.anonymous_actor_headers)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Intruments should not be able to be created by users that are not Instrument Operators
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Manager approves proposal for role request
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
#mke sure there are no more open negotiations
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0)
#Verify the user has been assigned the requested role in the second Org
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, True)
#Verify the user has only been assigned the requested role in the second Org and not in the first Org
ret = self.org_client.has_role(self.ion_org._id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
#Refresh headers with new role
actor_header = get_actor_header(actor_id)
#now try to request the same role for the same user - should be denied
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: not has_role',cm.exception.message)
#Now the user with the proper role should be able to create an instrument.
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 4)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
self.assertEqual(events_r[-1][2].role_name, sap_response2.role_name)
events_c = self.event_repo.find_events(origin=org2_id, event_type=OT.UserRoleGrantedEvent)
self.assertEquals(len(events_c), 2)
events_i = self.event_repo.find_events(origin=org2_id, event_type=OT.OrgNegotiationInitiatedEvent)
self.assertEquals(len(events_i), 3)
def test_org_acquire_resource_negotiation(self):
#Make sure that the system policies have been loaded
policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")
with self.assertRaises(BadRequest) as cm:
myorg = self.org_client.read_org()
self.assertTrue(cm.exception.message == 'The org_id parameter is missing')
log.debug('Begin testing with policies')
#Create a new user - should be denied for anonymous access
with self.assertRaises(Unauthorized) as cm:
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.anonymous_actor_headers)
self.assertIn( 'identity_management(signon) has been denied',cm.exception.message)
#Now create user with proper credentials
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
#Create a second Org
org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
org2_id = self.org_client.create_org(org2, headers=self.system_actor_header)
org2 = self.org_client.find_org(ORG2)
self.assertEqual(org2_id, org2._id)
roles = self.org_client.list_org_roles(org2_id)
self.assertEqual(len(roles),2)
self.assertItemsEqual([r.governance_name for r in roles], [MODERATOR_ROLE, MEMBER_ROLE])
#Create the Instrument Operator Role
operator_role = IonObject(RT.UserRole, governance_name=OPERATOR_ROLE,name='Instrument Operator', description='Instrument Operator')
#And add it to all Orgs
self.org_client.add_org_role(self.ion_org._id, operator_role, headers=self.system_actor_header)
self.org_client.add_org_role(org2_id, operator_role, headers=self.system_actor_header)
#Add the OPERATOR_ROLE to the User for the ION Org
self.org_client.grant_role(self.ion_org._id, actor_id, OPERATOR_ROLE, headers=self.system_actor_header)
#Enroll the user in the second Org - do without Negotiation for test
self.org_client.enroll_member(org2_id, actor_id,headers=self.system_actor_header )
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
#Test the invitation process
#Create a invitation proposal to add a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE,
originator=ProposalOriginatorEnum.PROVIDER )
sap_response = self.org_client.negotiate(sap, headers=self.system_actor_header )
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
#User creates proposal to approve
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED)
sap_response2 = self.org_client.negotiate(sap_response, headers=actor_header )
#Verify the user has been assigned the requested role in the second Org
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, True)
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 4)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
#Create the instrument agent with the user that has the proper role
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The Instrument Agent')
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
#Ensure the instrument agent has been created
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),1)
self.assertEquals(ia_list[0].lcstate, LCS.DRAFT)
self.assertEquals(ia_list[0].availability, AS.PRIVATE)
#Advance the Life cycle to planned. Must be OPERATOR so anonymous user should fail
with self.assertRaises(Unauthorized) as cm:
self.ims_client.execute_instrument_agent_lifecycle(ia_list[0]._id, LCE.PLAN, headers=self.anonymous_actor_headers)
self.assertIn( 'instrument_management(execute_instrument_agent_lifecycle) has been denied',cm.exception.message)
#Advance the Life cycle to planned. Must be OPERATOR
self.ims_client.execute_instrument_agent_lifecycle(ia_list[0]._id, LCE.PLAN, headers=actor_header)
ia = self.rr_client.read(ia_list[0]._id)
self.assertEquals(ia.lcstate, LCS.PLANNED)
#First make a acquire resource request with an non-enrolled user.
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=self.system_actor._id, provider=org2_id, resource_id=ia_list[0]._id )
sap_response = self.org_client.negotiate(sap, headers=self.system_actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_enrolled',cm.exception.message)
#Make a proposal to acquire a resource with an enrolled user that has the right role but the resource is not shared the Org
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_resource_shared',cm.exception.message)
#So share the resource
self.org_client.share_resource(org_id=org2_id, resource_id=ia_list[0]._id, headers=self.system_actor_header )
#Verify the resource is shared
res_list,_ = self.rr_client.find_objects(org2,PRED.hasResource)
self.assertEqual(len(res_list), 1)
self.assertEqual(res_list[0]._id, ia_list[0]._id)
#First try to acquire the resource exclusively but it should fail since the user cannot do this without first
#having had acquired the resource
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_resource_acquired',cm.exception.message)
#Make a proposal to acquire a resource with an enrolled user that has the right role and is now shared
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal, headers=actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),1)
self.assertEqual(negotiations[0]._id, sap_response.negotiation_id)
#Manager Creates a counter proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
#Counter proposals for demonstration only
#Calculate one week from now in milliseconds
cur_time = int(get_ion_ts())
week_expiration = cur_time + ( 7 * 24 * 60 * 60 * 1000 )
sap_response = Negotiation.create_counter_proposal(negotiations[0], originator=ProposalOriginatorEnum.PROVIDER)
sap_response.expiration = str(week_expiration)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
#User Creates a counter proposal
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
cur_time = int(get_ion_ts())
month_expiration = cur_time + ( 30 * 24 * 60 * 60 * 1000 )
sap_response = Negotiation.create_counter_proposal(negotiations[0])
sap_response.expiration = str(month_expiration)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME+1) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.AcquireResourceNegotiationStatusEvent)
self.assertEquals(len(events_r), 3)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.COUNTER])
self.assertEqual(events_r[-1][2].resource_id, ia_list[0]._id)
#Manager approves Instrument resource proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0) #Should be no more open negotiations for a user because auto-accept is enabled
#The following are no longer needed with auto-accept enabled for acquiring a resource
'''
self.assertEqual(len(negotiations),1)
#User accepts proposal in return
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED)
sap_response2 = self.org_client.negotiate(sap_response, headers=actor_header )
'''
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0)
#Check commitment to be active
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(commitments),1)
resource_commitment, _ = self.rr_client.find_objects(actor_id,PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(resource_commitment),1)
self.assertNotEqual(resource_commitment[0].lcstate, LCS.DELETED)
subjects, _ = self.rr_client.find_subjects(None,PRED.hasCommitment, commitments[0]._id)
self.assertEqual(len(subjects),3)
contracts, _ = self.rr_client.find_subjects(RT.Negotiation,PRED.hasContract, commitments[0]._id)
self.assertEqual(len(contracts),1)
cur_time = int(get_ion_ts())
invalid_expiration = cur_time + ( 13 * 60 * 60 * 1000 ) # 12 hours from now
#Now try to acquire the resource exclusively for longer than 12 hours
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id,
expiration=str(invalid_expiration))
sap_response = self.org_client.negotiate(sap, headers=actor_header )
#make sure the negotiation was | |
<filename>scripts/turtlebot.py<gh_stars>10-100
from math import pi, sqrt, atan2, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import rospy
import tf
from geometry_msgs.msg import Twist, Pose2D
from nav_msgs.msg import Odometry
from obstacle_detector.msg import Obstacles, CircleObstacle, SegmentObstacle
from vector2d import Vector2D
class TurtleBot:
def __init__(self, hexmap, turning_radius):
self.hexmap = hexmap
self.turning_radius = turning_radius
# motion planning parameters
self.sample_period = 0.1
self.rate = rospy.Rate(1/self.sample_period)
self.linear_vel = 1.0
self.angular_vel = self.linear_vel / self.turning_radius
self.remaining_time = 0 # within [0, sample_period]
self.smooth_start_update_interval = 0.03 # second
self.turning_clockwise = False
self.turning_clockwise_buffer = False
# send velocity commands
self.handpoint_offset = 0.2 # meter
self.controller_tune_K = 0.3
self.vel = Twist()
self.vel_pub = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)
# odometry feedback
self.logging_counter = 0
self.trajectory = list()
self.trajectory_hp = list()
self.trajectory_cmd = list()
self.pose = Pose2D()
self.pose_hp = Pose2D()
self.odom_sub = rospy.Subscriber("odom", Odometry, self.odom_callback)
# perception # TODO: probabilistic update
self.obstacle_list_candidate = dict()
self.obstacle_threshold = rospy.get_param("/hdcp_planner/obstacle_threshold", 100)
self.valid_sensing_range = self.hexmap.radius * 6
self.obstacle_sub = rospy.Subscriber("raw_obstacles", Obstacles, self.obstacle_callback)
def straight_line_trajectory_planning(self, start_point, end_point):
direction_vector = end_point - start_point
angle = direction_vector.angle
total_distance = abs(direction_vector)
local_time = self.remaining_time # remaining time is within [0, self.smaple_period]
while not rospy.is_shutdown():
current_distance = self.linear_vel * local_time
x_ref = start_point[0] + current_distance * cos(angle)
y_ref = start_point[1] + current_distance * sin(angle)
vx_ref = self.linear_vel * cos(angle)
vy_ref = self.linear_vel * sin(angle)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
local_time += self.sample_period
remaining_distance = total_distance - self.linear_vel * local_time
if remaining_distance < 0:
self.remaining_time = - remaining_distance/self.linear_vel
break
if self.remaining_time > self.sample_period or self.remaining_time < 0:
rospy.logwarn("line: remaining_time = " + str(self.remaining_time))
rospy.loginfo("local_time = " + str(local_time))
rospy.loginfo("start_point = " + str(start_point) + "; end_point = " + str(end_point))
rospy.loginfo("total_distance = " + str(total_distance) + "; remaining_distance" + str(remaining_distance))
def circle_trajectory_planning(self, start_point, end_point, center):
start_angle = atan2(start_point[1]-center[1], start_point[0]-center[0])
angle_difference = self.get_angle_difference(start_point, end_point, center)
local_time = self.remaining_time # remaining time is within [0, self.smaple_period]
while not rospy.is_shutdown():
if self.turning_clockwise:
current_angle = start_angle - self.angular_vel * local_time # from x to x_dot: take derivative wrt local_time
x_ref = center[0] + self.turning_radius * cos(current_angle) # x = cx + r*cos(a-vt)
y_ref = center[1] + self.turning_radius * sin(current_angle) # y = cy + r*sin(a-vt)
vx_ref = self.linear_vel * sin(current_angle) # x_dot = rv*sin(a-vt) # lin_vel = r*ang_vel
vy_ref = - self.linear_vel * cos(current_angle) # y_dot = -rv*cos(a-vt)
else:
current_angle = start_angle + self.angular_vel * local_time # from x to x_dot: take derivative wrt local_time
x_ref = center[0] + self.turning_radius * cos(current_angle) # x = cx + r*cos(a+vt)
y_ref = center[1] + self.turning_radius * sin(current_angle) # y = cy + r*sin(a+vt)
vx_ref = - self.linear_vel * sin(current_angle) # x_dot = -rv*sin(a+vt) # lin_vel = r*ang_vel
vy_ref = self.linear_vel * cos(current_angle) # y_dot = rv*cos(a+vt)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
local_time += self.sample_period
remaining_angle = angle_difference - self.angular_vel * local_time
if remaining_angle < 0:
self.remaining_time = - remaining_angle/self.angular_vel
break
if self.remaining_time > self.sample_period or self.remaining_time < 0:
rospy.logwarn("circle: remaining_time = " + str(self.remaining_time))
rospy.loginfo("local_time = " + str(local_time) + "; center = " + str(center))
rospy.loginfo("start_point = " + str(start_point) + "; end_point = " + str(end_point))
rospy.loginfo("angle_difference = " + str(angle_difference) + "; remaining_angle" + str(remaining_angle))
def initial_circle_trajectory_planning(self):
angle_difference = 2*pi
factor = 0
current_angle = 0
while not rospy.is_shutdown():
factor = factor + self.smooth_start_update_interval
linear_vel = self.linear_vel * min(factor, 1)
angular_vel = self.angular_vel * min(factor, 1)
current_angle = current_angle + angular_vel * self.sample_period
x_ref = self.turning_radius * cos(current_angle)
y_ref = self.turning_radius * sin(current_angle)
vx_ref = - linear_vel * sin(current_angle)
vy_ref = linear_vel * cos(current_angle)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
remaining_angle = angle_difference - current_angle
if remaining_angle < 0:
self.remaining_time = - remaining_angle/self.angular_vel
break
def get_angle_difference(self, start_point, end_point, center):
# compute CCW angle difference between two points on circumference
start_angle = atan2(start_point[1]-center[1], start_point[0]-center[0])
end_angle = atan2(end_point[1]-center[1], end_point[0]-center[0])
angle_difference = end_angle - start_angle
if angle_difference <= 0: # make sure value is within (0, 2pi]
angle_difference += 2*pi
# switch to CW angle difference if needed
if self.turning_clockwise and angle_difference != 2*pi: # 2*pi means we will turn full circle
angle_difference = 2*pi - angle_difference
return angle_difference
def find_tangent_points(self, current_hex, next_hex, init_point):
current_hex_center = self.hexmap.cube_to_cat(current_hex)
outer_start, outer_end = self.outer_tangent_points(current_hex, next_hex)
inner_start, inner_end = self.inner_tangent_points(current_hex, next_hex)
outer_diff = self.get_angle_difference(init_point, outer_start, current_hex_center)
inner_diff = self.get_angle_difference(init_point, inner_start, current_hex_center)
if inner_diff < outer_diff:
self.turning_clockwise_buffer = not self.turning_clockwise
return inner_start, inner_end
else:
self.turning_clockwise_buffer = self.turning_clockwise
return outer_start, outer_end
def inner_tangent_points(self, current_hex, target_hex):
current_center = self.hexmap.cube_to_cat(current_hex)
target_center = self.hexmap.cube_to_cat(target_hex)
ai = current_center[0]
bi = current_center[1]
aj = target_center[0]
bj = target_center[1]
w = (aj-ai)**2 + (bj-bi)**2
rt = self.turning_radius
rt2 = rt**2
if self.turning_clockwise:
xi = ai + (2*rt2*(aj-ai) - rt*(bj-bi)*sqrt(w-4*rt2))/w # minus sign for CW
yi = bi + (2*rt2*(bj-bi) - rt*(ai-aj)*sqrt(w-4*rt2))/w
xj = aj + (2*rt2*(ai-aj) - rt*(bi-bj)*sqrt(w-4*rt2))/w
yj = bj + (2*rt2*(bi-bj) - rt*(aj-ai)*sqrt(w-4*rt2))/w
else:
xi = ai + (2*rt2*(aj-ai) + rt*(bj-bi)*sqrt(w-4*rt2))/w # plus sign for CCW
yi = bi + (2*rt2*(bj-bi) + rt*(ai-aj)*sqrt(w-4*rt2))/w
xj = aj + (2*rt2*(ai-aj) + rt*(bi-bj)*sqrt(w-4*rt2))/w
yj = bj + (2*rt2*(bi-bj) + rt*(aj-ai)*sqrt(w-4*rt2))/w
return Vector2D(xi, yi), Vector2D(xj, yj) # start_point, end_point
def outer_tangent_points(self, current_hex, target_hex):
current_center = self.hexmap.cube_to_cat(current_hex)
target_center = self.hexmap.cube_to_cat(target_hex)
ai = current_center[0]
bi = current_center[1]
aj = target_center[0]
bj = target_center[1]
w = (aj-ai)**2 + (bj-bi)**2
rt = self.turning_radius
if self.turning_clockwise:
xi = ai + rt*(bi-bj)/sqrt(w) # plus sign for CW
yi = bi + rt*(aj-ai)/sqrt(w)
xj = aj + rt*(bi-bj)/sqrt(w)
yj = bj + rt*(aj-ai)/sqrt(w)
else:
xi = ai - rt*(bi-bj)/sqrt(w) # minus sign for CCW
yi = bi - rt*(aj-ai)/sqrt(w)
xj = aj - rt*(bi-bj)/sqrt(w)
yj = bj - rt*(aj-ai)/sqrt(w)
return Vector2D(xi, yi), Vector2D(xj, yj) # start_point, end_point
def tracking_controller(self, x_ref, y_ref, vx_ref, vy_ref):
'''
tracking controller design
vx = xh_d_dot - K * (xh - xh_d) => xh --> xh_d
vy = yh_d_dot - K * (yh - yh_d) => yh --> yh_d
'''
self.trajectory_cmd.append([x_ref, y_ref])
K = self.controller_tune_K # controller parameter
ux = vx_ref - K * (self.pose_hp.x - x_ref)
uy = vy_ref - K * (self.pose_hp.y - y_ref)
vel_hp = [ux, uy]
self.pub_vel_hp(vel_hp)
def pub_vel_hp(self, vel_hp):
'''
matrix transform
[ v ] 1 [ L*cos0 L*sin0 ] [ x ]
[ ] = --- * [ ] * [ ]
[ w ] L [ -sin0 cos0 ] [ y ]
'''
x = vel_hp[0]
y = vel_hp[1]
theta = self.pose_hp.theta
v = x*cos(theta) + y*sin(theta)
w = (x*(-sin(theta)) + y*cos(theta))/self.handpoint_offset
rospy.logdebug("vel: theta=" + str(theta) + "; x=" + str(x) +\
"; y=" + str(y) + "; v=" + str(v) + "; w=" + str(w))
self.vel.linear.x = v
self.vel.angular.z = w
self.vel_pub.publish(self.vel)
self.rate.sleep()
def odom_callback(self, msg):
# get (x, y, theta) specification from odometry topic
quarternion = [msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,\
msg.pose.pose.orientation.z, msg.pose.pose.orientation.w]
(_, _, yaw) = tf.transformations.euler_from_quaternion(quarternion)
self.pose.theta = yaw
self.pose.x = msg.pose.pose.position.x
self.pose.y = msg.pose.pose.position.y
self.pose_hp.theta = yaw
self.pose_hp.x = self.pose.x + self.handpoint_offset * cos(yaw)
self.pose_hp.y = self.pose.y + self.handpoint_offset * sin(yaw)
# reduce the number of saved messages to 1/10
self.logging_counter += 1
if self.logging_counter == 10:
self.logging_counter = 0
self.trajectory.append([self.pose.x, self.pose.y, self.pose.theta])
self.trajectory_hp.append([self.pose_hp.x, self.pose_hp.y, self.pose_hp.theta])
rospy.logdebug("odom: x=" + str(self.pose.x) +\
"; y=" + str(self.pose.y) + "; theta=" + str(yaw))
rospy.logdebug("odom_hp: x_hp=" + str(self.pose_hp.x) +\
"; y_hp=" + str(self.pose_hp.y) + "; theta=" + str(yaw))
def valid_sensing(self, point):
return abs(point - Vector2D(self.pose.x, self.pose.y)) < self.valid_sensing_range
def obstacle_callback(self, msg):
# sampling points on the obstacles
points = list()
for circle in msg.circles:
center = Vector2D(circle.center.x, circle.center.y)
points.append(center)
r = circle.true_radius
if r > 0:
for theta in np.arange(0, 2*pi, 0.3):
radius = Vector2D(r*cos(theta), r*sin(theta))
points.append(center + radius)
for segment in msg.segments:
line = Vector2D(segment.last_point.x - segment.first_point.x, \
segment.last_point.y - segment.first_point.y)
interval = 0
while abs(line) - interval > 0:
cx = segment.first_point.x + interval * cos(line.angle)
cy = segment.first_point.y + interval * sin(line.angle)
points.append(Vector2D(cx, cy))
interval += 0.2
for p in points:
if self.valid_sensing(p):
p_hex = self.hexmap.cat_to_cube(p)
if p_hex not in self.obstacle_list_candidate:
self.obstacle_list_candidate[p_hex] = 1
else:
self.obstacle_list_candidate[p_hex] += 1
for candidate, times in self.obstacle_list_candidate.items(): #TODO: probabilistic update
if times > | |
<reponame>mizutanilab/schizo-nn
from tensorflow.keras import layers
from tensorflow.keras import backend as K
import math
import numpy as np
#https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/layers/core.py
from tensorflow.python.keras.layers.ops import core as core_ops
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
class SzDense(layers.Layer):
def __init__(self,
units,
halfbandwidth=0,
param_reduction=0.5,
form='diagonal',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SzDense, self).__init__(
activity_regularizer=activity_regularizer, **kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
self.halfbandwidth = halfbandwidth
self.form = form
self.reduction_sv = param_reduction
self.num_ones = 0
self.reduced_ratio = 0
self.num_weights = 0
self.reduced_ratio = 0
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `SzDense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
last_dim = tensor_shape.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError('The last dimension of the inputs to `SzDense` '
'should be defined. Found `None`.')
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
self.window = self.add_weight(name='window',
shape=[last_dim, self.units],
initializer='ones',
trainable=False)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
#window init
self.num_ones = 0
self.reduced_ratio = 0
nx = last_dim
ny = self.units
self.num_weights = nx * ny
if self.halfbandwidth == 0:
self.halfbandwidth = (nx*ny / math.sqrt(nx*nx + ny*ny)) * (1. - math.sqrt(self.reduction_sv))
if self.form == 'gaussian':
self.halfbandwidth *= 1.5
#endif
wnd = np.zeros((nx,ny))
w_corr = 1.
if self.form == 'diagonal':
if ny > 1:
rxy = (nx-1) / (ny-1)
hwdiv = self.halfbandwidth * math.sqrt(rxy * rxy + 1)
for iy in range(ny):
ix1 = rxy * iy - hwdiv
ix1 = int(ix1) + 1 if ix1 >= 0 else 0
if ix1 > nx-1:
continue
ix2 = rxy * iy + hwdiv
ix2 = math.ceil(ix2) if ix2 < nx else nx
wnd[ix1:ix2, iy:iy+1] = 1
self.num_ones += (ix2-ix1)
#for ixiy
else:
wnd[:,:] = 1
self.num_ones += nx
#endif ny>1
self.reduced_ratio = (self.num_weights - self.num_ones) / self.num_weights
if self.num_ones > 0:
w_corr = self.num_weights / self.num_ones
self.kernel.assign(self.kernel * (wnd * w_corr))
elif self.form == 'gaussian':
if (self.halfbandwidth > 0) and (ny > 1):
sgm2 = 1. / (2. * self.halfbandwidth * self.halfbandwidth)
gsum = 0
rxy = (nx-1) / (ny-1)
for ix in range(nx):
for iy in range(ny):
gauss = math.exp(-(ix-rxy*iy)*(ix-rxy*iy)*sgm2)
wnd[ix][iy] = gauss
gsum += gauss
#for ixiy
self.reduced_ratio = 1. - gsum / self.num_weights
if gsum > 0:
w_corr = self.num_weights / gsum
wnd = wnd * w_corr
else:
wnd[:,:] = 1
self.num_ones = nx * ny
#endif halfbandwidth
elif self.form == 'random':
wnd = np.random.rand(nx,ny)
wnd = np.where(wnd < self.reduction_sv, 0, 1)
self.num_ones = np.sum(wnd)
self.reduced_ratio = (self.num_weights - self.num_ones) / self.num_weights
if self.num_ones > 0:
w_corr = self.num_weights / self.num_ones
self.kernel.assign(self.kernel * (wnd * w_corr))
#endif form_function
self.window.assign(wnd)
self.built = True
def call(self, inputs):
return core_ops.dense(
inputs,
self.kernel * self.window,
self.bias,
self.activation,
dtype=self._compute_dtype_object)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = super(SzDense, self).get_config()
config.update({
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
})
return config
def get_num_zeros(self):
return(self.num_weights - self.num_ones)
def get_num_weights(self):
return(self.num_weights)
def get_reduced_ratio(self):
return(self.reduced_ratio)
def get_halfbandwidth(self):
return(self.halfbandwidth)
#class SzDense
#https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/layers/convolutional.py
import functools
import six
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
class SzConv(layers.Layer):
def __init__(self,
rank,
filters,
kernel_size,
param_reduction=0.5,
form='diagonal',
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
conv_op=None,
**kwargs):
super(SzConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
if isinstance(filters, float):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == 'causal'
self._channels_first = self.data_format == 'channels_first'
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self.reduction_sv = param_reduction
self.form = form
self.num_ones = 0
self.num_weights = 0
self.reduced_ratio = 0
self.halfbandwidth = 0
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
if (self.padding == 'causal' and not isinstance(self,
(SzConv1D, SzSeparableConv1D))):
raise ValueError('Causal padding is only supported for `SzConv1D`'
'and `SzSeparableConv1D`.')
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
'The number of input channels must be evenly divisible by the number '
'of groups. Received groups={}, but the input has {} channels '
'(full input shape is {}).'.format(self.groups, input_channel,
input_shape))
kernel_shape = self.kernel_size + (input_channel // self.groups,
self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.window = self.add_weight(name='window',
shape=kernel_shape,
initializer='ones',
trainable=False)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={channel_axis: input_channel})
# Convert Keras formats to TF native formats.
if self.padding == 'causal':
tf_padding = 'VALID' # Causal padding handled in `call`.
elif isinstance(self.padding, six.string_types):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
tf_dilations = list(self.dilation_rate)
tf_strides = list(self.strides)
tf_op_name = self.__class__.__name__
if tf_op_name == 'SzConv1D':
tf_op_name = 'conv1d' # Backwards compat.
self._convolution_op = functools.partial(
nn_ops.convolution_v2,
strides=tf_strides,
padding=tf_padding,
dilations=tf_dilations,
data_format=self._tf_data_format,
name=tf_op_name)
#window initialization
wnd = np.zeros(kernel_shape)
w_corr = 1.
nx = input_channel // self.groups
ny = self.filters
if self.form == 'individual':
wnd = np.random.random_sample(kernel_shape)
wnd = np.where(wnd < self.reduction_sv, 0, 1)
elif self.form == 'kernel':
for ix in range(nx):
for iy in range(ny):
if random.random() > self.reduction_sv:
wnd[..., ix, iy] = 1
elif self.form == 'diagonal':
self.halfbandwidth = (nx*ny / math.sqrt(nx*nx + ny*ny)) * (1. - math.sqrt(self.reduction_sv))
if ny > 1:
rxy = (nx-1) / (ny-1)
hwdiv = self.halfbandwidth * math.sqrt(rxy * rxy + 1)
for iy in range(ny):
ix1 = rxy * iy - hwdiv
ix1 = int(ix1) + 1 if ix1 >= 0 else 0
if ix1 > nx-1:
continue
ix2 = rxy * iy + hwdiv
ix2 = math.ceil(ix2) if ix2 < nx else nx
wnd[..., ix1:ix2, iy:iy+1] = 1
#for ixiy
else:
wnd = np.ones(kernel_shape)
#endif ny>1
#endif self.form
self.num_ones = np.sum(wnd)
self.num_weights = wnd.size
self.reduced_ratio = (self.num_weights - self.num_ones) / self.num_weights
if self.num_ones > 0:
w_corr = self.num_weights / self.num_ones
self.kernel.assign(self.kernel * (wnd * w_corr))
self.window.assign(wnd)
self.built = True
def call(self, inputs):
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs))
outputs = self._convolution_op(inputs, self.kernel * self.window)
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
return nn.bias_add(o, self.bias, data_format=self._tf_data_format)
outputs = nn_ops.squeeze_batch_dims(
outputs, _apply_fn, inner_rank=self.rank + 1)
else:
outputs = nn.bias_add(
outputs, self.bias, data_format=self._tf_data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
if self.data_format == 'channels_last':
return tensor_shape.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters])
else:
return tensor_shape.TensorShape(
input_shape[:batch_rank] + [self.filters] +
self._spatial_output_shape(input_shape[batch_rank + 1:]))
| |
distance + 0.001
# internal nodes have entries
# have one-to-one relationship between nodes and entries
class RTreeNode:
def __init__(self, parent, entries, is_leaf):
self.parent = parent
# self.entries = entries
self.is_leaf = is_leaf
self.m = 8
self.M = 16
self.child_to_entry_dict = {}
for entry in entries:
curr_child = entry.getChild()
(self.child_to_entry_dict)[curr_child] = entry
def getParent(self):
return self.parent
def getEntries(self):
# return self.entries
return (self.child_to_entry_dict).values()
def getChildren(self):
# entries = self.getEntries()
# children = [x.getChild() for x in entries]
# return children
return (self.child_to_entry_dict).keys()
def getNumEntries(self):
return len(self.child_to_entry_dict)
def getNumChildren(self):
# return len(self.getChildren())
return self.getNumEntries()
def setParent(self, node):
self.parent = node
def isNonTraditionalLeafNode(self):
is_non_traditional_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and False not in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
return is_non_traditional_leaf_node
"""
def isTraditionalLeafNode(self):
is_traditional_leaf_node = self.getNumEntries() == 0
return is_traditional_leaf_node
"""
def isLeafNode(self):
# is root or have a child that is traditional leaf
# is_leaf_node = self.getNumChildren() == 0
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and self.getEntries()[0].getChild().getNumEntries() == 0)
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and True in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and True in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
is_leaf_node = self.getNumChildren() == 0
return is_leaf_node
# return self.getNumChildren() == 0
# return self.is_leaf
def setIsLeafNode(self, is_leaf):
self.is_leaf = is_leaf
def addEntry(self, entry):
# print "adding an entry:", entry.getMBR().toString()
# (self.entries).append(entry)
curr_child = entry.getChild()
(self.child_to_entry_dict)[curr_child] = entry
# entry must match exactly
def removeEntry(self, entry):
# index = (self.entries).index(entry)
# (self.entries).pop(index)
curr_child = entry.getChild()
(self.child_to_entry_dict).pop(curr_child)
def getMinimumNumEntriesPerNode(self):
return self.m
def getMaximumNumEntriesPerNode(self):
return self.M
def isFull(self):
return self.getNumEntries() >= self.getMaximumNumEntriesPerNode()
def isUnderfull(self):
return self.getNumEntries() < self.getMinimumNumEntriesPerNode()
"""
# indexing starts at zero
def getIndexForEntry(self, entry):
index = (self.entries).index(entry)
return index
def removeIthEntry(self, i):
(self.entries).pop(i)
"""
def retrieveEntryForChild(self, node):
return (self.child_to_entry_dict)[node]
"""
entries = self.getEntries()
children = [x.getChild() for x in entries]
# print "children:", [x.toString() for x in children]
# print "child:", node.toString()
# print "mbr's:", [x.getMBR().toString() for x in entries]
index = children.index(node)
chosen_entry = entries[index]
return chosen_entry
"""
"""
# indexing starts at zero
def getIthEntry(self, i):
return (self.entries)[i]
"""
def toString(self):
return str(self.getEntries())
# an entry is effectively an (mbr, child) pair
# mbr may be composite or raw
class RTreeEntry:
def __init__(self, mbr, child):
self.mbr = mbr
self.child = child
def getMBR(self):
return self.mbr
def setMBR(self, mbr):
self.mbr = mbr
def getChild(self):
return self.child
def setChild(self, node):
self.child = node
@staticmethod
def draw(tree, entries, image, depth):
for entry in entries:
RTreeEntry.drawHelper(tree, entry, image, depth)
@staticmethod
# def draw(self, tree, draw, depth):
def drawHelper(tree, entry, image, depth):
node = entry.getChild()
entries = node.getEntries()
mbr_list = [entry.getMBR()]
for mbr in mbr_list:
upper_left = mbr.getUpperLeft()
lower_right = mbr.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
# multiplier = 3 * 0.8
# multiplier = 1 / (1.0 * 1302) * 0.8
multiplier = 1 / (1.0 * 6.5) * 0.8
# offset = (768 * 0.2) / 2
offset = (1536 * 0.2) / 2
next_x1, next_y1 = (multiplier * x1 + offset, multiplier * y1 + offset)
next_x2, next_y2 = (multiplier * x2 + offset, multiplier * y2 + offset)
# if depth != 0 and depth != 1:
if depth != 0:
# continue
pass
"""
matching_entries = [x for x in entries if x.getMBR() == mbr]
matching_entry = matching_entries[0]
child = matching_entry.getChild()
next_entries = child.getEntries()
# print "num. of children:", len(next_entries)
"""
"""
for next_entry in next_entries:
mbr = next_entry.getMBR()
print "mbr:", mbr.toString()
"""
color_choice = depth % 3
# print upper_left, lower_right
color = None
if color_choice == 0:
# color = "rgb(255, 0, 0)"
color = PythonMagick.Color(65535, 0, 0, 32767)
elif color_choice == 1:
# color = "rgb(0, 0, 255)"
color = PythonMagick.Color(0, 0, 65535, 32767)
elif color_choice == 2:
# color = "rgb(0, 255, 0)"
color = PythonMagick.Color(0, 65535, 0, 32767)
if upper_left == lower_right:
# draw.ellipse([(next_x1 - 4, next_y1 - 4), (next_x2 + 4, next_y2 + 4)], fill = color)
# print "drew an ellipse"
image.strokeColor("none")
image.fillColor(color)
center_x = next_x1
center_y = next_y1
radius = 4
perimeter_x = next_x1
perimeter_y = next_y1 + radius
image.draw(PythonMagick.DrawableCircle(center_x, center_y, perimeter_x, perimeter_y))
else:
# draw.rectangle([next_x1, next_y1, next_x2, next_y2], outline = color)
image.strokeColor(color)
image.fillColor("none")
image.strokeWidth(4)
image.draw(PythonMagick.DrawableRectangle(next_x1, next_y1, next_x2, next_y2))
# mbr_list = [x.getMBR() for x in entries]
# if len(entries) == 0 and tree.getRootEntry().getChild() != self:
if len(entries) == 0:
# draw a point
parent = entry.getChild().getParent()
# entry = parent.retrieveEntryForChild(self)
# entry = self
mbr = entry.getMBR()
location = Point.toPoint(mbr)
x, y = location
# multiplier = 3 * 0.8
# multiplier = 1 / (1.0 * 1302) * 0.8
multiplier = 1 / (1.0 * 6.5) * 0.8
# offset = (768 * 0.2) / 2
offset = (1536 * 0.2) / 2
next_x = multiplier * x
next_y = multiplier * y
"""
draw.ellipse([(next_x - 2 + offset, next_y - 2 + offset), \
(next_x + 2 + offset, next_y + 2 + offset)], fill = "rgb(0, 0, 0)")
"""
image.strokeColor("none")
image.fillColor("black")
center_x = next_x + offset
center_y = next_y + offset
radius = 2
perimeter_x = next_x + offset
perimeter_y = next_y + offset + radius
image.draw(PythonMagick.DrawableCircle(center_x, center_y, perimeter_x, perimeter_y))
children = [x.getChild() for x in entries]
# print
"""
for child in children:
# child.draw(tree, draw, depth + 1)
child.draw(tree, image, depth + 1)
"""
entry.draw(tree, entries, image, depth + 1)
# del draw
# x goes from left (negative) to right (positive)
# y goes from top (negative) to bottom (positive)
class MBR:
def __init__(self, upper_left, lower_right):
self.upper_left = upper_left
self.lower_right = lower_right
def isRaw(self):
return False
def isComposite(self):
return False
def getUpperLeft(self):
return self.upper_left
def getLowerRight(self):
return self.lower_right
def getArea(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
side1_length = x2 - x1
side2_length = y2 - y1
area = side1_length * side2_length
return area
# require that base_mbr is composite and mbr is raw or composite
# return a composite MBR object
@staticmethod
def getEnlargedMBR(base_mbr, mbr):
mbr_list = [base_mbr, mbr]
upper_left_points = [x.getUpperLeft() for x in mbr_list]
lower_right_points = [x.getLowerRight() for x in mbr_list]
points = upper_left_points + lower_right_points
x_values = [x[0] for x in points]
y_values = [x[1] for x in points]
min_x_value = min(x_values)
max_x_value = max(x_values)
min_y_value = min(y_values)
max_y_value = max(y_values)
upper_left_point = (min_x_value, min_y_value)
lower_right_point = (max_x_value, max_y_value)
result_mbr_list = base_mbr.getMBRList() + [mbr]
mbr = CompositeMBR(upper_left_point, lower_right_point, result_mbr_list)
return mbr
@staticmethod
def getAreaEnlargement(base_mbr, mbr):
base_mbr_area = base_mbr.getArea()
enlarged_mbr = MBR.getEnlargedMBR(base_mbr, mbr)
enlarged_mbr_area = enlarged_mbr.getArea()
area_change = enlarged_mbr_area - base_mbr_area
return area_change
@staticmethod
def doOverlap(mbr_a, mbr_b):
upper_left_a = mbr_a.getUpperLeft()
lower_right_a = mbr_a.getLowerRight()
upper_left_b = mbr_b.getUpperLeft()
lower_right_b = mbr_b.getLowerRight()
x_a1, y_a1 = upper_left_a
x_a2, y_a2 = lower_right_a
x_b1, y_b1 = upper_left_b
x_b2, y_b2 = lower_right_b
do_overlap = x_a1 <= x_b2 and x_a2 >= x_b1 and y_a1 <= y_b2 and y_a2 >= y_b1
# print mbr_a.toString(), mbr_b.toString(), do_overlap
return do_overlap
@staticmethod
def findOverlapArea(mbr_a, mbr_b):
if MBR.doOverlap(mbr_a, mbr_b) == False:
return 0
else:
upper_left_a = mbr_a.getUpperLeft()
x_a1, y_a1 = upper_left_a
lower_right_a = mbr_a.getLowerRight()
x_a2, y_a2 = lower_right_a
upper_left_b = mbr_b.getUpperLeft()
x_b1, y_b1 = upper_left_b
lower_right_b = mbr_b.getLowerRight()
x_b2, y_b2 = lower_right_b
"""
print x_a1, y_a1
print x_a2, y_a2
print x_b1, y_b1
print x_b2, y_b2
"""
side1 = max(0, min(x_a2, x_b2) - max(x_a1, x_b1))
side2 = max(0, min(y_a2, y_b2) - max(y_a1, y_b1))
intersection_area = side1 * side2
return intersection_area
def getMarginValue(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
margin = 2 * (x2 - x1) + 2 * (y2 - y1)
return margin
def getCenter(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
x_center = | |
= dst_bounds.left + (xres * dst_width)
y1 = dst_bounds.top - (yres * dst_height)
dst_bounds = rio.coords.BoundingBox(top=dst_bounds.top, left=dst_bounds.left, bottom=y1, right=x1)
# Set output shape (Note: dst_size is (ncol, nrow))
if dst_size is not None:
dst_shape = (self.count, dst_size[1], dst_size[0])
dst_data = np.ones(dst_shape, dtype=dtype)
reproj_kwargs.update({"destination": dst_data})
else:
dst_shape = (self.count, self.height, self.width)
# If dst_bounds is set, will enforce dst_bounds
if dst_bounds is not None:
if dst_size is None:
# Calculate new raster size which ensures that pixels resolution is as close as possible to original
# Raster size is increased by up to one pixel if needed
yres, xres = self.res
dst_width = int(np.ceil((dst_bounds.right - dst_bounds.left) / xres))
dst_height = int(np.ceil(np.abs(dst_bounds.bottom - dst_bounds.top) / yres))
dst_size = (dst_width, dst_height)
# Calculate associated transform
dst_transform = rio.transform.from_bounds(*dst_bounds, width=dst_size[0], height=dst_size[1])
# Specify the output bounds and shape, let rasterio handle the rest
reproj_kwargs.update({"dst_transform": dst_transform})
dst_data = np.ones((dst_size[1], dst_size[0]), dtype=dtype)
reproj_kwargs.update({"destination": dst_data})
# Check that reprojection is actually needed
# Caution, dst_size is (width, height) while shape is (height, width)
if all(
[
(dst_transform == self.transform) or (dst_transform is None),
(dst_crs == self.crs) or (dst_crs is None),
(dst_size == self.shape[::-1]) or (dst_size is None),
(dst_res == self.res) or (dst_res == self.res[0] == self.res[1]) or (dst_res is None),
]
):
if (dst_nodata == self.nodata) or (dst_nodata is None):
if not silent:
warnings.warn("Output projection, bounds and size are identical -> return self (not a copy!)")
return self
elif dst_nodata is not None:
if not silent:
warnings.warn(
"Only nodata is different, consider using the 'set_ndv()' method instead'\
' -> return self (not a copy!)"
)
return self
# Set the performance keywords
if n_threads == 0:
# Default to cpu count minus one. If the cpu count is undefined, num_threads will be 1
cpu_count = os.cpu_count() or 2
num_threads = cpu_count - 1
else:
num_threads = n_threads
reproj_kwargs.update({"num_threads": num_threads, "warp_mem_limit": memory_limit})
# Currently reprojects all in-memory bands at once.
# This may need to be improved to allow reprojecting from-disk.
# See rio.warp.reproject docstring for more info.
dst_data, dst_transformed = rio.warp.reproject(self.data, **reproj_kwargs)
# Enforce output type
dst_data = dst_data.astype(dtype)
# Check for funny business.
if dst_transform is not None:
assert dst_transform == dst_transformed
# Write results to a new Raster.
dst_r = self.from_array(dst_data, dst_transformed, dst_crs, dst_nodata)
return dst_r
def shift(self, xoff: float, yoff: float) -> None:
"""
Translate the Raster by a given x,y offset.
:param xoff: Translation x offset.
:param yoff: Translation y offset.
"""
# Check that data is loaded, as it is necessary for this method
assert self.is_loaded, "Data must be loaded, use self.load"
meta = self.ds.meta
dx, b, xmin, d, dy, ymax = list(self.transform)[:6]
meta.update({"transform": rio.transform.Affine(dx, b, xmin + xoff, d, dy, ymax + yoff)})
self._update(metadata=meta)
def set_ndv(self, ndv: abc.Sequence[int | float] | int | float, update_array: bool = False) -> None:
"""
Set new nodata values for bands (and possibly update arrays).
:param ndv: nodata values
:param update_array: change the existing nodata in array
"""
if not isinstance(ndv, (abc.Sequence, int, float, np.integer, np.floating)):
raise ValueError("Type of ndv not understood, must be list or float or int")
elif (isinstance(ndv, (int, float, np.integer, np.floating))) and self.count > 1:
print("Several raster band: using nodata value for all bands")
ndv = [ndv] * self.count
elif isinstance(ndv, abc.Sequence) and self.count == 1:
print("Only one raster band: using first nodata value provided")
ndv = list(ndv)[0]
# Check that ndv has same length as number of bands in self
if isinstance(ndv, abc.Sequence):
if len(ndv) != self.count:
raise ValueError(f"Length of ndv ({len(ndv)}) incompatible with number of bands ({self.count})")
# Check that ndv value is compatible with dtype
for k in range(len(ndv)):
if not rio.dtypes.can_cast_dtype(ndv[k], self.dtypes[k]):
raise ValueError(f"ndv value {ndv[k]} incompatible with self.dtype {self.dtypes[k]}")
else:
if not rio.dtypes.can_cast_dtype(ndv, self.dtypes[0]):
raise ValueError(f"ndv value {ndv} incompatible with self.dtype {self.dtypes[0]}")
meta = self.ds.meta
imgdata = self.data
pre_ndv = self.nodata
meta.update({"nodata": ndv})
if update_array and pre_ndv is not None:
# nodata values are specific to each band
# let's do a loop then
if self.count == 1:
if np.ma.isMaskedArray(imgdata):
imgdata.data[imgdata.mask] = ndv # type: ignore
else:
ind = imgdata[:] == pre_ndv
imgdata[ind] = ndv
else:
# At this point, ndv is definitely iterable, but mypy doesn't understand that.
for i in range(self.count):
if np.ma.isMaskedArray(imgdata):
imgdata.data[i, imgdata.mask[i, :]] = ndv[i] # type: ignore
else:
ind = imgdata[i, :] == pre_ndv[i] # type: ignore
imgdata[i, ind] = ndv[i] # type: ignore
else:
imgdata = None
self._update(metadata=meta, imgdata=imgdata)
def save(
self,
filename: str | IO[bytes],
driver: str = "GTiff",
dtype: np.dtype | None = None,
compress: str = "deflate",
tiled: bool = False,
blank_value: None | int | float = None,
co_opts: dict[str, str] | None = None,
metadata: dict[str, Any] | None = None,
gcps: list[tuple[float, ...]] | None = None,
gcps_crs: CRS | None = None,
) -> None:
"""Write the Raster to a geo-referenced file.
Given a filename to save the Raster to, create a geo-referenced file
on disk which contains the contents of self.data.
If blank_value is set to an integer or float, then instead of writing
the contents of self.data to disk, write this provided value to every
pixel instead.
:param filename: Filename to write the file to.
:param driver: the 'GDAL' driver to use to write the file as.
:param dtype: Data Type to write the image as (defaults to dtype of image data)
:param compress: Compression type. Defaults to 'deflate' (equal to GDALs: COMPRESS=DEFLATE)
:param tiled: Whether to write blocks in tiles instead of strips. Improves read performance on large files,
but increases file size.
:param blank_value: Use to write an image out with every pixel's value
corresponding to this value, instead of writing the image data to disk.
:param co_opts: GDAL creation options provided as a dictionary,
e.g. {'TILED':'YES', 'COMPRESS':'LZW'}
:param metadata: pairs of metadata key, value
:param gcps: list of gcps, each gcp being [row, col, x, y, (z)]
:param gcps_crs: the CRS of the GCPS (Default is None)
:returns: None.
"""
dtype = self.data.dtype if dtype is None else dtype
if co_opts is None:
co_opts = {}
if metadata is None:
metadata = {}
if gcps is None:
gcps = []
if (self.data is None) & (blank_value is None):
raise AttributeError("No data loaded, and alternative blank_value not set.")
elif blank_value is not None:
if isinstance(blank_value, int) | isinstance(blank_value, float):
save_data = np.zeros((self.ds.count, self.ds.height, self.ds.width))
save_data[:, :, :] = blank_value
else:
raise ValueError("blank_values must be one of int, float (or None).")
else:
save_data = self.data
with rio.open(
filename,
"w",
driver=driver,
height=self.ds.height,
width=self.ds.width,
count=self.ds.count,
dtype=save_data.dtype,
crs=self.ds.crs,
transform=self.ds.transform,
nodata=self.ds.nodata,
compress=compress,
tiled=tiled,
**co_opts,
) as dst:
dst.write(save_data)
# Add metadata (tags in rio)
dst.update_tags(**metadata)
# Save GCPs
if not isinstance(gcps, list):
raise ValueError("gcps must be a list")
if len(gcps) > 0:
rio_gcps = []
for gcp in gcps:
rio_gcps.append(rio.control.GroundControlPoint(*gcp))
# Warning: this will overwrite the transform
if dst.transform != rio.transform.Affine(1, 0, 0, 0, 1, 0):
warnings.warn(
"A geotransform previously set is going \
to be cleared due to the setting of GCPs."
)
dst.gcps = (rio_gcps, gcps_crs)
def to_xarray(self, name: str | None = None) -> rioxarray.DataArray:
"""Convert this Raster into an xarray DataArray using rioxarray.
This method uses rioxarray to generate a DataArray with associated
geo-referencing information.
See the documentation of rioxarray and xarray for more information on
the methods and attributes of the resulting DataArray.
:param name: Set the name of the DataArray.
:returns: xarray DataArray
"""
if not _has_rioxarray:
raise ImportError("rioxarray is required for this functionality.")
xr = rioxarray.open_rasterio(self.ds)
if name is not None:
xr.name = name
return xr
def get_bounds_projected(self, out_crs: CRS, densify_pts_max: int = 5000) -> rio.coords.BoundingBox:
"""
Return self's bounds in the given CRS.
:param out_crs: Output CRS
:param densify_pts_max: Maximum points to be added between image corners to account for non linear edges.
Reduce if time computation is really critical (ms) or increase if extent is | |
= self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetOssTempUrlRequest(TeaModel):
def __init__(
self,
open_team_id: str = None,
key: str = None,
file_name: str = None,
fetch_mode: str = None,
):
# 团队开放ID
self.open_team_id = open_team_id
# oss文件key
self.key = key
# 文件名
self.file_name = file_name
# 访问模式 AUTO(自动,例如在浏览器中如果是图片,PDF等可以在线直接查看,不能在线看时自动下载)、DOWNLOAD(直接下载)
self.fetch_mode = fetch_mode
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.key is not None:
result['key'] = self.key
if self.file_name is not None:
result['fileName'] = self.file_name
if self.fetch_mode is not None:
result['fetchMode'] = self.fetch_mode
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('key') is not None:
self.key = m.get('key')
if m.get('fileName') is not None:
self.file_name = m.get('fileName')
if m.get('fetchMode') is not None:
self.fetch_mode = m.get('fetchMode')
return self
class GetOssTempUrlResponseBody(TeaModel):
def __init__(
self,
url: str = None,
):
# Id of the request
self.url = url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.url is not None:
result['url'] = self.url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('url') is not None:
self.url = m.get('url')
return self
class GetOssTempUrlResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetOssTempUrlResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetOssTempUrlResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class TakeTicketHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class TakeTicketRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_token_grant_type: int = None,
open_team_id: str = None,
taker_union_id: str = None,
open_ticket_id: str = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_token_grant_type = ding_token_grant_type
self.open_team_id = open_team_id
self.taker_union_id = taker_union_id
self.open_ticket_id = open_ticket_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.taker_union_id is not None:
result['takerUnionId'] = self.taker_union_id
if self.open_ticket_id is not None:
result['openTicketId'] = self.open_ticket_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('takerUnionId') is not None:
self.taker_union_id = m.get('takerUnionId')
if m.get('openTicketId') is not None:
self.open_ticket_id = m.get('openTicketId')
return self
class TakeTicketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class SendServiceGroupMessageHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendServiceGroupMessageRequestBtns(TeaModel):
def __init__(
self,
action_url: str = None,
title: str = None,
):
# 跳转地址
self.action_url = action_url
# 按钮名称
self.title = title
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.action_url is not None:
result['actionURL'] = self.action_url
if self.title is not None:
result['title'] = self.title
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('actionURL') is not None:
self.action_url = m.get('actionURL')
if m.get('title') is not None:
self.title = m.get('title')
return self
class SendServiceGroupMessageRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_token_grant_type: int = None,
ding_suite_key: str = None,
target_open_conversation_id: str = None,
title: str = None,
content: str = None,
is_at_all: bool = None,
at_mobiles: List[str] = None,
at_dingtalk_ids: List[str] = None,
at_union_ids: List[str] = None,
receiver_mobiles: List[str] = None,
receiver_dingtalk_ids: List[str] = None,
receiver_union_ids: List[str] = None,
message_type: str = None,
btn_orientation: str = None,
btns: List[SendServiceGroupMessageRequestBtns] = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_token_grant_type = ding_token_grant_type
self.ding_suite_key = ding_suite_key
# 开放群ID
self.target_open_conversation_id = target_open_conversation_id
# 标题
self.title = title
# 内容
self.content = content
# 是否 at所有人
self.is_at_all = is_at_all
# at 手机号
self.at_mobiles = at_mobiles
# at dingtalkId
self.at_dingtalk_ids = at_dingtalk_ids
# at unionIds
self.at_union_ids = at_union_ids
# 手机号接收者
self.receiver_mobiles = receiver_mobiles
# dingtalkId接收者
self.receiver_dingtalk_ids = receiver_dingtalk_ids
# unionId接收者
self.receiver_union_ids = receiver_union_ids
# 消息类型:MARKDOWN,ACTIONCARD
self.message_type = message_type
# 排列方式:0-按钮竖直排列,1-按钮横向排列
self.btn_orientation = btn_orientation
# actionCard按钮
self.btns = btns
def validate(self):
if self.btns:
for k in self.btns:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.target_open_conversation_id is not None:
result['targetOpenConversationId'] = self.target_open_conversation_id
if self.title is not None:
result['title'] = self.title
if self.content is not None:
result['content'] = self.content
if self.is_at_all is not None:
result['isAtAll'] = self.is_at_all
if self.at_mobiles is not None:
result['atMobiles'] = self.at_mobiles
if self.at_dingtalk_ids is not None:
result['atDingtalkIds'] = self.at_dingtalk_ids
if self.at_union_ids is not None:
result['atUnionIds'] = self.at_union_ids
if self.receiver_mobiles is not None:
result['receiverMobiles'] = self.receiver_mobiles
if self.receiver_dingtalk_ids is not None:
result['receiverDingtalkIds'] = self.receiver_dingtalk_ids
if self.receiver_union_ids is not None:
result['receiverUnionIds'] = self.receiver_union_ids
if self.message_type is not None:
result['messageType'] = self.message_type
if self.btn_orientation is not None:
result['btnOrientation'] = self.btn_orientation
result['btns'] = []
if self.btns is not None:
for k in self.btns:
result['btns'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('targetOpenConversationId') is not None:
self.target_open_conversation_id = m.get('targetOpenConversationId')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('content') is not None:
self.content = m.get('content')
if m.get('isAtAll') is not None:
self.is_at_all = m.get('isAtAll')
if m.get('atMobiles') is not None:
self.at_mobiles = m.get('atMobiles')
if m.get('atDingtalkIds') is not None:
self.at_dingtalk_ids = m.get('atDingtalkIds')
if m.get('atUnionIds') | |
points - " +
str(prediction_at_list))
out_writer.write("Evaluating for the fold-" + str(count) + " for the forecast reference points -- " +
str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n")
else:
print("Evaluating for the final model over the " + " forecast reference points - " +
str(prediction_at_list))
out_writer.write("Evaluating for the final model over the" + " forecast reference points -- " +
str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n")
print("Computing MAE, MSE, RMSE for weighted average based predictions on the User -- " + str(user_id))
out_writer.write("Computing MAE, MSE, RMSE for weighted average based predictions"
" plain and on N days on the User -- " + str(user_id) + "\n")
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
print("MAE -- ", mean_absolute_error(y_labels, pred_weighted_average))
out_writer.write("MAE -- " + str(mean_absolute_error(y_labels, pred_weighted_average)) + "\n")
# MAE for N days
print("MAE for N days -- ",
str(mean_absolute_error(y_labels, pred_weighted_average) / ndays))
out_writer.write("MAE for N days -- "
+ str(mean_absolute_error(y_labels, pred_weighted_average) / ndays) + "\n")
print("MSE -- ", mean_squared_error(y_labels, pred_weighted_average))
out_writer.write("MSE -- " + str(mean_squared_error(y_labels, pred_weighted_average)) + "\n")
# MSE for N days
print("MSE for N days-- ", str(mean_squared_error(y_labels, pred_weighted_average) / ndays))
out_writer.write(
"MSE for N days -- " + str(mean_squared_error(y_labels, pred_weighted_average) / ndays) + "\n")
print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_weighted_average)))
out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "\n")
# RMSE for N days
print("RMSE for N days -- ", str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average)) / ndays))
out_writer.write("RMSE for N days -- " + str(
np.sqrt(mean_squared_error(y_labels, pred_weighted_average)) / ndays) + "\n")
# pred_lr = compute_linear_regression(test_user_nn, encoded_data, prediction_at_list , method="mean")
m_count = 0
# Writing to csv file
if not fold_count == "final":
csv_out_writer.write("".join(str(user_id) + "," +
str(count) + "," +
str(mean_absolute_error(y_labels, pred_weighted_average)) + "," +
str(mean_squared_error(y_labels, pred_weighted_average)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," +
"weighted_average" + ","
# str(y_labels) + "," +
# str(pred_weighted_average)
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1])
+ "," + str(pred_weighted_average[2]) + "\n"))
else:
csv_out_writer.write("".join(str(user_id) + "," +
str("test") + "," +
str(mean_absolute_error(y_labels, pred_weighted_average)) + "," +
str(mean_squared_error(y_labels, pred_weighted_average)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," +
"weighted_average" + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1])
+ "," + str(pred_weighted_average[2]) + "\n"))
# + str(y_labels) + str(pred_weighted_average)
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
print("Computing MAE, MSE, RMSE for {} {} based predictions for the user -- {}"
.format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id)))
out_writer.write("Computing MAE, MSE, RMSE for {} {} based predictions for the user -- {} \n"
.format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id)))
print("MAE -- ", mean_absolute_error(y_labels, pred_lr))
out_writer.write("MAE -- " + str(mean_absolute_error(y_labels, pred_lr)) + "\n")
print("MSE -- ", mean_squared_error(y_labels, pred_lr))
out_writer.write("MSE -- " + str(mean_squared_error(y_labels, pred_lr)) + "\n")
print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_lr)))
out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "\n")
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
# Write to csv file
if not fold_count == "final":
csv_out_writer.write("".join(str(user_id) + "," +
str(count) + "," +
str(mean_absolute_error(y_labels, pred_lr)) + "," +
str(mean_squared_error(y_labels, pred_lr)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," +
str("lr") + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str(
pred_lr[2]) + "\n"))
else:
csv_out_writer.write("".join(str(user_id) + "," +
str("test") + "," +
str(mean_absolute_error(y_labels, pred_lr)) + "," +
str(mean_squared_error(y_labels, pred_lr)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," +
str("lr") + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str(
pred_lr[2]) + "\n"))
import properties
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
# Create prediction reference points
from sklearn.metrics import *
# Here, to change to different evaluations
from time_series_grp import TimeSeriesGroupProcessing
from HEOM import HEOM
from scipy.spatial.distance import pdist, squareform
# Change method and execute to get the predictions appropriately, these are configurations
eval_method = "mean"
wt_distance = False
# Random Neighbors
rand_neighbors = False
# Default day readings for all test users must be at mean and prediction are between min - mean - max
tsg_data = TimeSeriesGroupProcessing(method=eval_method)
# For all combinations evaluation it must be set to True
quest_cmb_all = True
# Same random state needs to be maintained to get consistent test data over all combinations and repeatable results
random_state = 1220
# It is the setting to get the ahead prediction for tinnitus distress, 3 here means for 3 days
# min it is a day and max of about 60days between points which is not an usual scenario
ndays = 3
# Load user build models over the time series observations
# user_best_models = utility.load_ts_model("best_params_usr_models")
# user_best_estimators = utility.load_ts_model("cross_val_estimators")
# KFOLDS - Evaluation over K=5 folds are done.
# Build the default model with all the combination.
if not quest_cmb_all:
for key, val in properties.quest_comb.items():
# Build model for each category
print("Building model for the question combination -- " + str(key))
out_writer = open("".join("output/output_simulate_" + str(key) + "_" + str(eval_method) + "_heom_norm.txt"), "w+")
csv_out_writer = open("".join("output/output__simulate_" + str(key) + "_" + str(eval_method) + "_heom_norm.csv"), "w+")
# key = "bg_tinnitus_history"
# val = properties.quest_comb[key]
cat_idx, num_idx, combined_df = smf.initial_processing(key, val, append_synthethic=True)
# Build and get the knn model for prediction over test instances.
# Save the data objs
encoded_data = save_data_objs(combined_df, key)
csv_out_writer.write("".join("user_id,fold,mae,mse,rmse,algorithm,"
"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n"))
# Create a specific test set as per requirements to contain digital twin, outlier and normal instances
random_user_ids = encoded_data["user_id"].sample(n=3, random_state=42).to_list()
"""
10 test users in following format:
1. Outliers -- [8,20,27,149]
2. DT - [44428, 444154, 444133]
3. Random Users - random_user_ids with random state same so always same test set is retrieved.
"""
test_simulation_ids = [8, 20, 27, 149, 44428, 444154, 444133] + random_user_ids
test = encoded_data[encoded_data["user_id"].isin(test_simulation_ids)]
X = encoded_data[~encoded_data["user_id"].isin(test_simulation_ids)]
def filter_train_ids(x):
# print(x)
if x["user_id"] in train_user_ids:
return x
def filter_test_ids(x):
# print(x)
if x["user_id"] in test_user_ids:
return x
train_user_ids = X["user_id"].to_list()
X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna()
X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int)
# Save the non encoded train data for visualization purposes
utility.save_model("".join("/simulate/" + key + "/" + key + "_train_stat_q_data"), X_train_data_ui)
# filter and get the data to show to the UI for the test data.
test_user_ids = test["user_id"].to_list()
X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna()
X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int)
# Save the data_ui object as json
test_data = {}
test_data["users"] = X_test_data_ui.to_dict("r")
utility.save_data("".join("simulate/test_data_ui_" + key), test_data)
heom = HEOM(X.to_numpy(), cat_idx, num_idx)
sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)
mean_heom_distance = sim_matrix.mean()
knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)
knn.fit(X.iloc[:, 1:])
dist, test_idx = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)
do_test(test, out_writer, csv_out_writer, ndays, test_idx, X,
fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=wt_distance)
utility.save_model("".join("simulate/" + key + "/" + "knn_static"), knn)
utility.save_model("".join("simulate/" + key + "/" + "train_sim_data.pckl"), X)
out_writer.close()
csv_out_writer.close()
else:
cat_idx, num_idx, combined_df = initial_processing()
# Build model for each category
print("Building model for the question combination -- " + str("overall"))
# Take this combined_df and split into train and test.
# Split some data out of test as part unseen from the UI
# data_ui_val, data = combined_df.iloc[:5, :], combined_df.iloc[5:, :]
# Save the data objs
encoded_data = save_data_objs(combined_df, "overall")
random_user_ids = encoded_data["user_id"].sample(n=3, random_state=42).to_list()
test_simulation_ids = [8, 20, 27, 149, 44428, 444154, 444133] + random_user_ids
test = encoded_data[encoded_data["user_id"].isin(test_simulation_ids)]
X = encoded_data[~encoded_data["user_id"].isin(test_simulation_ids)]
def filter_train_ids(x):
# print(x)
if x["user_id"] in train_user_ids:
return x
def filter_test_ids(x):
# print(x)
if x["user_id"] in test_user_ids:
return x
train_user_ids = X["user_id"].to_list()
X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna()
X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int)
utility.save_model("".join("/simulate/" + "overall" + "/" + "overall" + "_train_stat_q_data"), X_train_data_ui)
# filter and get the data to show to the UI for the test data.
test_user_ids = test["user_id"].to_list()
X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna()
X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int)
# Save the data_ui object as json
test_data = {}
test_data["users"] = X_test_data_ui.to_dict("r")
utility.save_data("simulate/test_data_ui_x_test", test_data)
count = 0
out_writer = open("output/simulate_overall_output_folds_" + str(eval_method) + ".txt", "w+")
csv_out_writer = open("output/simulate_overall_output_folds_" + str(eval_method) + ".csv", "w+")
# First get the time series for a given test patient and the reference point and iterate to evaluate
csv_out_writer.write("user_id,fold,mae,mse,rmse,algorithm,"
"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n")
# Split the data into train and test and evaluate as a final model
from sklearn.model_selection import train_test_split
import utility
from HEOM import HEOM
# | |
<reponame>hongzhonglu/vivarium
from __future__ import absolute_import, division, print_function
import os
import argparse
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import random
import math
import numpy as np
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import hsv_to_rgb
from matplotlib.collections import LineCollection
# pymunk imports
import pymunkoptions
pymunkoptions.options["debug"] = False
import pymunk
import pymunk.pygame_util
# pygame for debugging
import pygame
from pygame.key import *
from pygame.locals import *
from pygame.color import *
# vivarium imports
from vivarium.compartment.emitter import timeseries_from_data
from vivarium.compartment.process import (
Process,
COMPARTMENT_STATE)
from vivarium.compartment.composition import (
process_in_compartment,
simulate_process,
simulate_compartment)
from vivarium.processes.Vladimirov2008_motor import run, tumble
from vivarium.processes.derive_globals import (
volume_from_length)
DEBUG_SIZE = 600 # size of the pygame debug screen
DEFAULT_BOUNDS = [10, 10]
# constants
PI = math.pi
# colors for phylogeny initial agents
HUES = [hue/360 for hue in np.linspace(0,360,30)]
DEFAULT_HUE = HUES[0]
DEFAULT_SV = [100.0/100.0, 70.0/100.0]
# agent port keys
AGENT_KEYS = ['location', 'angle', 'volume', 'length', 'width', 'mass', 'forces']
NON_AGENT_KEYS = ['fields', 'time', 'global', COMPARTMENT_STATE]
def random_body_position(body):
# pick a random point along the boundary
width, length = body.dimensions
if random.randint(0, 1) == 0:
# force along ends
if random.randint(0, 1) == 0:
# force on the left end
location = (random.uniform(0, width), 0)
else:
# force on the right end
location = (random.uniform(0, width), length)
else:
# force along length
if random.randint(0, 1) == 0:
# force on the bottom end
location = (0, random.uniform(0, length))
else:
# force on the top end
location = (width, random.uniform(0, length))
return location
def daughter_locations(parent_location, parent_length, parent_angle):
pos_ratios = [-0.25, 0.25]
daughter_locations = []
for daughter in range(2):
dx = parent_length * pos_ratios[daughter] * math.cos(parent_angle)
dy = parent_length * pos_ratios[daughter] * math.sin(parent_angle)
location = [parent_location[0] + dx, parent_location[1] + dy]
daughter_locations.append(location)
return daughter_locations
class Multibody(Process):
"""
A multi-body physics process using pymunk
Notes:
- rotational diffusion in liquid medium with viscosity = 1 mPa.s: Dr = 3.5+/-0.3 rad^2/s
(Saragosti, et al. 2012. Modeling E. coli tumbles by rotational diffusion.)
- translational diffusion in liquid medium with viscosity = 1 mPa.s: Dt=100 micrometers^2/s
(Saragosti, et al. 2012. Modeling E. coli tumbles by rotational diffusion.)
"""
defaults = {
'initial_agents': {},
'elasticity': 0.9,
'damping': 0.05, # simulates viscous forces to reduce velocity at low Reynolds number (1 = no damping, 0 = full damping)
'angular_damping': 0.7, # less damping for angular velocity seems to improve behavior
'friction': 0.9, # TODO -- does this do anything?
'physics_dt': 0.005,
'force_scaling': 100, # scales from pN
'jitter_force': 1e-3, # pN
'bounds': DEFAULT_BOUNDS,
'mother_machine': False,
'animate': False,
'debug': False,
}
def __init__(self, initial_parameters={}):
# hardcoded parameters
self.elasticity = self.defaults['elasticity']
self.friction = self.defaults['friction']
self.damping = self.defaults['damping']
self.angular_damping = self.defaults['angular_damping']
self.force_scaling = self.defaults['force_scaling']
self.physics_dt = self.defaults['physics_dt']
# configured parameters
self.jitter_force = initial_parameters.get('jitter_force', self.defaults['jitter_force'])
self.bounds = initial_parameters.get('bounds', self.defaults['bounds'])
# initialize pymunk space
self.space = pymunk.Space()
# debug screen with pygame
self.pygame_viz = initial_parameters.get('debug', self.defaults['debug'])
self.pygame_scale = 1 # pygame_scale scales the debug screen
if self.pygame_viz:
max_bound = max(self.bounds)
self.pygame_scale = DEBUG_SIZE / max_bound
self.force_scaling *= self.pygame_scale
pygame.init()
self._screen = pygame.display.set_mode((
int(self.bounds[0]*self.pygame_scale),
int(self.bounds[1]*self.pygame_scale)), RESIZABLE)
self._clock = pygame.time.Clock()
self._draw_options = pymunk.pygame_util.DrawOptions(self._screen)
# add static barriers
# TODO -- mother machine configuration
self.mother_machine = initial_parameters.get('mother_machine', self.defaults['mother_machine'])
self.add_barriers(self.bounds)
# initialize agents
self.agent_bodies = {}
self.initial_agents = initial_parameters.get('agents', self.defaults['initial_agents'])
for agent_id, specs in self.initial_agents.items():
self.add_body_from_center(agent_id, specs)
# interactive plot for visualization
self.animate = initial_parameters.get('animate', self.defaults['animate'])
if self.animate:
plt.ion()
self.ax = plt.gca()
self.ax.set_aspect('equal')
self.animate_frame(self.initial_agents)
# all initial agents get a key under a single port
ports = {'agents': ['agents']}
parameters = {}
parameters.update(initial_parameters)
super(Multibody, self).__init__(ports, parameters)
def default_settings(self):
state = {'agents': {'agents': self.initial_agents}}
schema = {'agents': {'agents': {'updater': 'merge'}}}
default_emitter_keys = {
port_id: keys for port_id, keys in self.ports.items()}
return {
'state': state,
'schema': schema,
'emitter_keys': default_emitter_keys,
'time_step': 2
}
def next_update(self, timestep, states):
agents = states['agents']['agents']
# animate before update
if self.animate:
self.animate_frame(agents)
# if an agent has been removed from the agents store,
# remove it from space and agent_bodies
removed_agents = [
agent_id for agent_id in self.agent_bodies.keys()
if agent_id not in agents.keys()]
for agent_id in removed_agents:
body, shape = self.agent_bodies[agent_id]
self.space.remove(body, shape)
del self.agent_bodies[agent_id]
# update agents, add new agents
for agent_id, specs in agents.items():
if agent_id in self.agent_bodies:
self.update_body(agent_id, specs)
else:
self.add_body_from_center(agent_id, specs)
# run simulation
self.run(timestep)
# get new agent position
agent_position = {
agent_id: self.get_body_position(agent_id)
for agent_id in self.agent_bodies.keys()}
return {'agents': {'agents': agent_position}}
def run(self, timestep):
assert self.physics_dt < timestep
time = 0
while time < timestep:
time += self.physics_dt
# apply forces
for body in self.space.bodies:
self.apply_jitter_force(body)
self.apply_motile_force(body)
self.apply_viscous_force(body)
# run for a physics timestep
self.space.step(self.physics_dt)
if self.pygame_viz:
self._update_screen()
def apply_motile_force(self, body):
width, length = body.dimensions
# motile forces
motile_location = (width / 2, 0) # apply force at back end of body
motile_force = [0.0, 0.0]
if hasattr(body, 'motile_force'):
thrust, torque = body.motile_force
motile_force = [thrust, 0.0]
# add directly to angular velocity
body.angular_velocity += torque
# force-based torque
# if torque != 0.0:
# motile_force = get_force_with_angle(thrust, torque)
scaled_motile_force = [thrust * self.force_scaling for thrust in motile_force]
body.apply_force_at_local_point(scaled_motile_force, motile_location)
def apply_jitter_force(self, body):
jitter_location = random_body_position(body)
jitter_force = [
random.normalvariate(0, self.jitter_force),
random.normalvariate(0, self.jitter_force)]
scaled_jitter_force = [
force * self.force_scaling
for force in jitter_force]
body.apply_force_at_local_point(
scaled_jitter_force,
jitter_location)
def apply_viscous_force(self, body):
# dampen the velocity
body.velocity = body.velocity * self.damping + (body.force / body.mass) * self.physics_dt
body.angular_velocity = body.angular_velocity * self.angular_damping + body.torque / body.moment * self.physics_dt
def add_barriers(self, bounds):
""" Create static barriers """
thickness = 0.2
x_bound = bounds[0] * self.pygame_scale
y_bound = bounds[1] * self.pygame_scale
static_body = self.space.static_body
static_lines = [
pymunk.Segment(static_body, (0.0, 0.0), (x_bound, 0.0), thickness),
pymunk.Segment(static_body, (x_bound, 0.0), (x_bound, y_bound), thickness),
pymunk.Segment(static_body, (x_bound, y_bound), (0.0, y_bound), thickness),
pymunk.Segment(static_body, (0.0, y_bound), (0.0, 0.0), thickness),
]
if self.mother_machine:
channel_height = self.mother_machine.get('channel_height') * self.pygame_scale
channel_space = self.mother_machine.get('channel_space') * self.pygame_scale
n_lines = math.floor(x_bound/channel_space)
machine_lines = [
pymunk.Segment(
static_body,
(channel_space * line, 0),
(channel_space * line, channel_height), thickness)
for line in range(n_lines)]
static_lines += machine_lines
for line in static_lines:
line.elasticity = 0.0 # no bounce
line.friction = 0.9
self.space.add(static_lines)
def add_body_from_center(self, body_id, body):
width = body['width'] * self.pygame_scale
length = body['length'] * self.pygame_scale
mass = body['mass']
center_position = body['location']
angle = body['angle']
angular_velocity = body.get('angular_velocity', 0.0)
half_length = length / 2
half_width = width / 2
shape = pymunk.Poly(None, (
(-half_length, -half_width),
(half_length, -half_width),
(half_length, half_width),
(-half_length, half_width)))
inertia = pymunk.moment_for_poly(mass, shape.get_vertices())
body = pymunk.Body(mass, inertia)
shape.body = body
body.position = (
center_position[0] * self.pygame_scale,
center_position[1] * self.pygame_scale)
body.angle = angle
body.dimensions = (width, length)
body.angular_velocity = angular_velocity
shape.elasticity = self.elasticity
shape.friction = self.friction
# add body and shape to space
self.space.add(body, shape)
# add body to agents dictionary
self.agent_bodies[body_id] = (body, shape)
def update_body(self, body_id, specs):
length = specs['length'] * self.pygame_scale
width = specs['width'] * self.pygame_scale
mass = specs['mass']
motile_force = specs.get('motile_force', [0, 0])
body, shape = self.agent_bodies[body_id]
position = body.position
angle = body.angle
# make shape, moment of inertia, and add a body
half_length = length/2
half_width = width/2
new_shape = pymunk.Poly(None, (
(-half_length, -half_width),
(half_length, -half_width),
(half_length, half_width),
(-half_length, half_width)))
inertia = pymunk.moment_for_poly(mass, new_shape.get_vertices())
new_body = pymunk.Body(mass, inertia)
new_shape.body = new_body
new_body.position = position
new_body.angle = angle
new_body.angular_velocity = body.angular_velocity
new_body.dimensions = (width, length)
new_body.motile_force = motile_force
new_shape.elasticity = shape.elasticity
new_shape.friction = shape.friction
# swap bodies
self.space.remove(body, shape)
self.space.add(new_body, new_shape)
# update body
self.agent_bodies[body_id] = (new_body, new_shape)
def get_body_position(self, agent_id):
body, shape = self.agent_bodies[agent_id]
position = body.position
rescaled_position = [
position[0] / self.pygame_scale,
position[1] / self.pygame_scale]
# enforce bounds
rescaled_position = [
0 if pos<0 else pos
for idx, pos in enumerate(rescaled_position)]
rescaled_position = [
self.bounds[idx] if pos>self.bounds[idx] else pos
for idx, pos in enumerate(rescaled_position)]
return {
'location': rescaled_position,
'angle': body.angle,
}
## matplotlib interactive plot
def animate_frame(self, agents):
plt.cla()
for agent_id, data in agents.items():
# location, orientation, length
x_center = data['location'][0]
y_center = data['location'][1]
angle = data['angle'] / PI * 180 + 90 # rotate 90 degrees to match field
length = data['length']
width = data['width']
# get bottom left position
x_offset = (width / 2)
y_offset = (length / 2)
theta_rad = math.radians(angle)
dx = x_offset * math.cos(theta_rad) - y_offset * math.sin(theta_rad)
dy = x_offset * math.sin(theta_rad) + y_offset * math.cos(theta_rad)
x = x_center - | |
<filename>emmet-builders/emmet/builders/qchem/molecules.py
from datetime import datetime
from itertools import chain, groupby
from math import ceil
from typing import Any, Dict, Iterable, Iterator, List, Optional
import networkx as nx
from maggma.builders import Builder
from maggma.stores import Store
from maggma.utils import grouper
from emmet.builders.settings import EmmetBuildSettings
from emmet.core.utils import form_env, group_molecules, jsanitize
from emmet.core.qchem.molecule import best_lot, evaluate_lot, MoleculeDoc
from emmet.core.qchem.task import TaskDocument
from emmet.core.qchem.calc_types import LevelOfTheory
from emmet.core.molecules.bonds import make_mol_graph
__author__ = "<NAME> <<EMAIL>>"
SETTINGS = EmmetBuildSettings()
def evaluate_molecule(
mol_doc: MoleculeDoc,
funct_scores: Dict[str, int] = SETTINGS.QCHEM_FUNCTIONAL_QUALITY_SCORES,
basis_scores: Dict[str, int] = SETTINGS.QCHEM_BASIS_QUALITY_SCORES,
solvent_scores: Dict[str, int] = SETTINGS.QCHEM_SOLVENT_MODEL_QUALITY_SCORES,
):
"""
Helper function to order optimization calcs by
- Level of theory
- Electronic energy
:param mol_doc: Molecule to be evaluated
:param funct_scores: Scores for various density functionals
:param basis_scores: Scores for various basis sets
:param solvent_scores: Scores for various implicit solvent models
:return:
"""
best = best_lot(mol_doc, funct_scores, basis_scores, solvent_scores)
lot_eval = evaluate_lot(best, funct_scores, basis_scores, solvent_scores)
return (
-1 * int(mol_doc.deprecated),
sum(lot_eval),
mol_doc.best_entries[best]["energy"],
)
class MoleculesAssociationBuilder(Builder):
"""
The MoleculesBuilder matches Q-Chem task documents by composition
and collects tasks associated with identical structures.
The purpose of this builder is to group calculations in preparation for the
MoleculesBuilder.
The process is as follows:
1.) Find all documents with the same formula
2.) Select only task documents for the task_types we can select properties from
3.) Aggregate task documents based on nuclear geometry
4.) Create a MoleculeDoc from the group of task documents
"""
def __init__(
self,
tasks: Store,
assoc: Store,
query: Optional[Dict] = None,
settings: Optional[EmmetBuildSettings] = None,
**kwargs,
):
"""
Args:
tasks: Store of task documents
assoc: Store of associated molecules documents to prepare
query: dictionary to limit tasks to be analyzed
settings: EmmetSettings to use in the build process
"""
self.tasks = tasks
self.assoc = assoc
self.query = query if query else dict()
self.settings = EmmetBuildSettings.autoload(settings)
self.kwargs = kwargs
super().__init__(sources=[tasks], targets=[assoc])
def ensure_indexes(self):
"""
Ensures indices on the collections needed for building
"""
# Basic search index for tasks
self.tasks.ensure_index("task_id")
self.tasks.ensure_index("last_updated")
self.tasks.ensure_index("state")
self.tasks.ensure_index("formula_alphabetical")
# Search index for molecules
self.assoc.ensure_index("molecule_id")
self.assoc.ensure_index("last_updated")
self.assoc.ensure_index("task_ids")
self.tasks.ensure_index("formula_alphabetical")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
"""Prechunk the molecule builder for distributed computation"""
temp_query = dict(self.query)
temp_query["state"] = "successful"
self.logger.info("Finding tasks to process")
all_tasks = list(
self.tasks.query(temp_query, [self.tasks.key, "formula_alphabetical"])
)
processed_tasks = set(self.assoc.distinct("task_ids"))
to_process_tasks = {d[self.tasks.key] for d in all_tasks} - processed_tasks
to_process_forms = {
d["formula_alphabetical"]
for d in all_tasks
if d[self.tasks.key] in to_process_tasks
}
N = ceil(len(to_process_forms) / number_splits)
for formula_chunk in grouper(to_process_forms, N):
yield {"query": {"formula_alphabetical": {"$in": list(formula_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets all items to process into molecules (and other) documents.
This does no datetime checking; relying on on whether
task_ids are included in the molecules Store
Returns:
generator or list relevant tasks and molecules to process into documents
"""
self.logger.info("Molecule association builder started")
self.logger.info(
f"Allowed task types: {[task_type.value for task_type in self.settings.QCHEM_ALLOWED_TASK_TYPES]}"
)
self.logger.info("Setting indexes")
self.ensure_indexes()
# Save timestamp to mark buildtime
self.timestamp = datetime.utcnow()
# Get all processed tasks
temp_query = dict(self.query)
temp_query["state"] = "successful"
self.logger.info("Finding tasks to process")
all_tasks = list(
self.tasks.query(temp_query, [self.tasks.key, "formula_alphabetical"])
)
processed_tasks = set(self.assoc.distinct("task_ids"))
to_process_tasks = {d[self.tasks.key] for d in all_tasks} - processed_tasks
to_process_forms = {
d["formula_alphabetical"]
for d in all_tasks
if d[self.tasks.key] in to_process_tasks
}
self.logger.info(f"Found {len(to_process_tasks)} unprocessed tasks")
self.logger.info(f"Found {len(to_process_forms)} unprocessed formulas")
# Set total for builder bars to have a total
self.total = len(to_process_forms)
projected_fields = [
"last_updated",
"task_id",
"formula_alphabetical",
"orig",
"tags",
"walltime",
"cputime",
"output",
"calcs_reversed",
"special_run_type",
"custom_smd",
"critic2",
]
for formula in to_process_forms:
tasks_query = dict(temp_query)
tasks_query["formula_alphabetical"] = formula
tasks = list(
self.tasks.query(criteria=tasks_query, properties=projected_fields)
)
for t in tasks:
# TODO: Validation
# basic validation here ensures that tasks with invalid levels of
# theory don't halt the build pipeline
try:
TaskDocument(**t).level_of_theory
t["is_valid"] = True
except Exception as e:
self.logger.info(
f"Processing task {t['task_id']} failed with Exception - {e}"
)
t["is_valid"] = False
yield tasks
def process_item(self, items: List[Dict]) -> List[Dict]:
"""
Process the tasks into a MoleculeDoc
Args:
tasks [dict] : a list of task docs
Returns:
[dict] : a list of new molecule docs
"""
tasks = [TaskDocument(**task) for task in items if task["is_valid"]]
formula = tasks[0].formula_alphabetical
task_ids = [task.task_id for task in tasks]
self.logger.debug(f"Processing {formula} : {task_ids}")
molecules = list()
for group in self.filter_and_group_tasks(tasks):
try:
molecules.append(MoleculeDoc.from_tasks(group))
except Exception as e:
failed_ids = list({t_.task_id for t_ in group})
doc = MoleculeDoc.construct_deprecated_molecule(tasks)
doc.warnings.append(str(e))
molecules.append(doc)
self.logger.warn(
f"Failed making material for {failed_ids}."
f" Inserted as deprecated molecule: {doc.molecule_id}"
)
self.logger.debug(f"Produced {len(molecules)} molecules for {formula}")
return jsanitize([mol.dict() for mol in molecules], allow_bson=True)
def update_targets(self, items: List[Dict]):
"""
Inserts the new molecules into the molecules collection
Args:
items [[dict]]: A list of molecules to update
"""
docs = list(chain.from_iterable(items)) # type: ignore
for item in docs:
item.update({"_bt": self.timestamp})
molecule_ids = list({item["molecule_id"] for item in docs})
if len(items) > 0:
self.logger.info(f"Updating {len(docs)} molecules")
self.assoc.remove_docs({self.assoc.key: {"$in": molecule_ids}})
self.assoc.update(
docs=docs,
key=["molecule_id"],
)
else:
self.logger.info("No items to update")
def filter_and_group_tasks(
self, tasks: List[TaskDocument]
) -> Iterator[List[TaskDocument]]:
"""
Groups tasks by identical structure
"""
filtered_tasks = [
task
for task in tasks
if any(
allowed_type is task.task_type
for allowed_type in self.settings.QCHEM_ALLOWED_TASK_TYPES
)
]
molecules = list()
lots = list()
for idx, task in enumerate(filtered_tasks):
if task.output.optimized_molecule:
m = task.output.optimized_molecule
else:
m = task.output.initial_molecule
m.index: int = idx # type: ignore
molecules.append(m)
lots.append(task.level_of_theory.value)
grouped_molecules = group_molecules(molecules, lots)
for group in grouped_molecules:
grouped_tasks = [filtered_tasks[mol.index] for mol in group] # type: ignore
yield grouped_tasks
class MoleculesBuilder(Builder):
"""
The MoleculesBuilder collects MoleculeDocs from the MoleculesAssociationBuilder
and groups them by key properties (charge, spin multiplicity, bonding).
Then, the best molecular structure is identified (based on electronic energy),
and this document becomes the representative MoleculeDoc.
The process is as follows:
1.) Find all documents with the same formula
2.) Group documents based on charge, spin, and bonding
3.) Create a MoleculeDoc from the group of task documents
"""
def __init__(
self,
assoc: Store,
molecules: Store,
query: Optional[Dict] = None,
settings: Optional[EmmetBuildSettings] = None,
prefix: Optional[str] = None,
**kwargs,
):
"""
Args:
assoc: Store of associated molecules documents, created by MoleculesAssociationBuilder
molecules: Store of processed molecules documents
query: dictionary to limit tasks to be analyzed
settings: EmmetSettings to use in the build process
prefix: String prefix for MPIDs of processed MoleculeDocs. For instance, for the
Lithium-Ion Battery Electrolyte (LIBE) dataset, the prefix would be "libe".
Default is None
"""
self.assoc = assoc
self.molecules = molecules
self.query = query if query else dict()
self.settings = EmmetBuildSettings.autoload(settings)
self.prefix = prefix
self.kwargs = kwargs
super().__init__(sources=[assoc], targets=[molecules])
def ensure_indexes(self):
"""
Ensures indices on the collections needed for building
"""
# Search index for associated molecules
self.assoc.ensure_index("molecule_id")
self.assoc.ensure_index("last_updated")
self.assoc.ensure_index("task_ids")
self.assoc.ensure_index("formula_alphabetical")
# Search index for molecules
self.molecules.ensure_index("molecule_id")
self.molecules.ensure_index("last_updated")
self.molecules.ensure_index("task_ids")
self.molecules.ensure_index("formula_alphabetical")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
"""Prechunk the molecule builder for distributed computation"""
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_assoc = list(
self.assoc.query(temp_query, [self.assoc.key, "formula_alphabetical"])
)
# int and split manipulation necessary because of MPID prefixing done during building
processed_docs = set(
[int(e.split("-")[-1]) for e in self.molecules.distinct("molecule_id")]
)
to_process_docs = {d[self.assoc.key] for d in all_assoc} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_assoc
if d[self.assoc.key] in to_process_docs
}
N = ceil(len(to_process_forms) / number_splits)
for formula_chunk in grouper(to_process_forms, N):
yield {"query": {"formula_alphabetical": {"$in": list(formula_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets all items to process into molecules (and other) documents.
This does no datetime checking; relying on on whether
task_ids are included in the molecules Store
Returns:
generator or list relevant tasks and molecules to process into documents
"""
self.logger.info("Molecules builder started")
self.logger.info("Setting indexes")
self.ensure_indexes()
# Save timestamp to mark buildtime
self.timestamp = datetime.utcnow()
# Get all processed molecules
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_assoc = list(
self.assoc.query(temp_query, [self.assoc.key, "formula_alphabetical"])
)
processed_docs = set(
[int(e.split("-")[-1]) for e in self.molecules.distinct("molecule_id")]
)
to_process_docs = {d[self.assoc.key] for d in all_assoc} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_assoc
if d[self.assoc.key] in to_process_docs
}
self.logger.info(f"Found {len(to_process_docs)} unprocessed documents")
self.logger.info(f"Found {len(to_process_forms)} unprocessed formulas")
# Set | |
import numpy as np
from math import*
import pybrain.rl.environments
from pybrain.utilities import one_to_n
from environment import Environment
# The agent's actions are T and d.
# TODO where do we set up the generalization?
# TODO must pass omegadd to the learner.
# TODO the tiling might be achieved by implementing Task.getObservation.
# TODO states and actions are converted to int's within sarsa.py, ...what does
# this mean?
# TODO might need to use NFQ instead of Q or Sarsa.
# TODO NFQ might used a fix value of alpha as 0.5.
# TODO set epsilon for epsilon-greedy learning using learner.explorer.epsilon.
# TODO pybrain has limited examples of doing RL using continuous states and
# value-based learners (generalizing). Then we can use ActionValueNetwork, but
# it's not clear to me yet how this 'discretizes'/generalizes the state space.
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
prescribed speed.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
# See Randlov's code. Paper and thesis say 12 degrees, but his code uses
# pi/15. These are actually equivalent.
#max_tilt = 12.0 * np.pi / 180.0
max_tilt = np.pi / 15.0
nactions = 9
def __init__(self, butt_disturbance_amplitude=0.02, only_steer=False,
max_time=1000.0):
"""
Parameters
----------
butt_disturbance_amplitude : float; optional
In meters.
"""
super(BalanceTask, self).__init__(Environment())
# Keep track of time in case we want to end episodes based on number of
# time steps.
self._butt_disturbance_amplitude = butt_disturbance_amplitude
self.only_steer = only_steer
self.max_time = max_time
self.t = 0
# TODO Sensor limits to normalize the sensor readings.
# TODO Actor limits.
#T_limits = (-2, 2) # Newtons.
#d_limits = (-0.02, 0.02) # meters.
## None for sensor limits; does not normalize sensor values.
## outdim should be set to the length of the sensors vector.
#self.setScaling([None] * self.env.outdim, [T_limits, d_limits])
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
# Map the action integer to a torque and displacement.
assert round(action[0]) == action[0]
if self.only_steer:
T = 2 * (action[0] / 4.0 - 1.0)
d = 0.
else:
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + self._butt_disturbance_amplitude * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi, psig) = self.env.getSensors()
# TODO not calling superclass to do normalization, etc.
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode.
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
print 'hit max time.', self.t, elapsed_time
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
# TODO return -np.abs(self.env.getSensors()[0])
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the input space into 3456 tiles.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
def __init__(self, *args, **kwargs):
super(LinearFATileCoding3456BalanceTask, self).__init__(*args, **kwargs)
# Count the number of times that each state is visited.
self.bin_count = np.zeros(self.outdim)
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
linear_index = np.dot(self.magic_array, bin_indices)
if linear_index > self.outdim:
# DEBUGGING PRINTS
print self.isFinished()
print self.env.getTilt()
print np.abs(self.env.getTilt())
print self.max_tilt
print np.abs(self.env.getTilt()) > self.max_tilt
print self.env.getSensors()[0:5]
print self.magic_array
print bin_index_for_each_dim
print linear_index
return linear_index
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi, psig) = self.env.getSensors()
# TODO not calling superclass to do normalization, etc.
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
self.bin_count += state
return state
class LSPIBalanceTask(BalanceTask):
"""Lagoudakis, 2002; simplified for just balancing. Also, we're still using
all 9 possible actions.
"""
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 14
def getPhi(self, theta, thetad, omega, omegad, omegadd):
return np.array([
1, omega, omegad, omega**2, omegad**2, omega * omegad,
theta, thetad, theta**2, thetad**2, theta * thetad,
omega * theta, omega * theta**2, omega**2 * theta,
])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi, psig) = self.env.getSensors()
return self.getPhi(theta, thetad, omega, omegad, omegadd)
class LinearFATileCoding3456GoToTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the input space into 3456 tiles.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
linear_index = np.dot(self.magic_array, bin_indices)
if linear_index > self.outdim:
# DEBUGGING PRINTS
print self.isFinished()
print self.env.getTilt()
print np.abs(self.env.getTilt())
print self.max_tilt
print np.abs(self.env.getTilt()) > self.max_tilt
print self.env.getSensors()[0:5]
print self.magic_array
print bin_index_for_each_dim
print linear_index
return linear_index
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi, psig) = self.env.getSensors()
# TODO not calling superclass to do normalization, etc.
return one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
def getReward(self):
# -1 reward for falling over; no reward otherwise.
x = 15.
y = 20.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
else:
return 1e-5 * ((self.env.getXF() - x)**2 + (self.env.getYF() - y)**2)
class LinearFATileCoding3456BalanceTaskCleverReward(LinearFATileCoding3456BalanceTask):
b_reward = 1 - LinearFATileCoding3456BalanceTask.max_tilt
def getReward(self):
#Assigns reward based on a modified power low distribution
x = np.abs(self.env.getTilt())
R = -sqrt(x+self.b_reward)
return R
class LinearFATileCoding3456BalanceTaskCleverReward2(LinearFATileCoding3456BalanceTask):
b_reward = 1 - LinearFATileCoding3456BalanceTask.max_tilt
def getReward(self):
#Assigns reward based on a modified power low distribution
x = np.abs(self.env.getTilt())
R = -(x+self.b_reward)**3
return R
class LinearFATileCoding3456BalanceTaskRewardPower4(LinearFATileCoding3456BalanceTask):
b_reward = 1 - LinearFATileCoding3456BalanceTask.max_tilt
def getReward(self):
#Assigns reward based on a modified power low distribution
x = np.abs(self.env.getTilt())
R = -(x+self.b_reward)**4
return R
class LinearFATileCoding3456BalanceTaskRewardPower5(LinearFATileCoding3456BalanceTask):
b_reward = 1 - LinearFATileCoding3456BalanceTask.max_tilt
def getReward(self):
#Assigns reward based on a modified power low distribution
x = np.abs(self.env.getTilt())
R = -(x+self.b_reward)**5
return R
class LinearFATileCoding3456BalanceTaskRewardPower6(LinearFATileCoding3456BalanceTask):
b_reward = 1 | |
self._cache.put('_lastconfig_', self._name, self._config)
# call custom initialization
if hasattr(self, 'doInit'):
self.doInit(self._mode)
def _getCache(self):
"""Indirection needed by the Cache client itself."""
return session.cache
def _validateType(self, value, param, paraminfo=None):
"""Validate and coerce the value of a parameter to the correct type.
If the value can't be coerced, a ConfigurationError is raised.
"""
paraminfo = paraminfo or self._getParamConfig(param)
try:
value = paraminfo.type(value)
except (ValueError, TypeError) as err:
raise ConfigurationError(
self, '%r is an invalid value for parameter %s: %s' % (
value, param, err)) from err
return value
def _initParam(self, param, paraminfo=None):
"""Get an initial value for the parameter, called when the cache
doesn't contain such a value.
If present, a doReadParam method is called. Otherwise, the value comes
from either the setup file or the device-specific default value.
"""
paraminfo = paraminfo or self.parameters[param]
rmethod = getattr(self, 'doRead' + param.title(), None)
umethod = getattr(self, 'doUpdate' + param.title(), None)
done = False
# try to read from the hardware (only in non-simulation mode)
if not self._sim_intercept and rmethod:
try:
value = rmethod()
except NicosError:
self.log.warning('could not read initial value for parameter '
'%s from device', param)
else:
done = True
if not done and param in self._params:
# happens when called from a param getter, not from init()
value = self._params[param]
elif not done:
value = self._config.get(param, paraminfo.default)
value = self._validateType(value, param, paraminfo)
if self._cache: # will not be there in simulation mode
self._cache.put(self, param, value)
# always call update methods, they should be working for simulation
if umethod:
umethod(value)
self._params[param] = value
return value
def _setROParam(self, param, value):
"""Set an otherwise read-only parameter.
This is useful for parameters that change at runtime, but indirectly,
such as "last filenumber".
"""
value = self._validateType(value, param)
self._params[param] = value
if self._cache:
self._cache.put(self, param, value)
def _getParamConfig(self, param):
"""Return the entry for the parameter from self.parameters.
This should be used when alias resolution is desired, since it is
overridden for DeviceAliases.
"""
return self.parameters[param]
def _getFromCache(self, name, func, maxage=None):
"""Get *name* from the cache, or call *func* if outdated/not present.
If the *maxage* parameter is set, do not allow the value to be older
than that amount of seconds.
"""
if not self._cache:
return func()
val = Ellipsis
if maxage != 0:
val = self._cache.get(
self, name, Ellipsis,
mintime=currenttime() - maxage if maxage is not None else 0)
if val is Ellipsis:
defmaxage = getattr(self, 'maxage', None)
val = func(defmaxage if maxage is None else maxage)
self._cache.put(self, name, val, currenttime(), defmaxage)
return val
def formatParam(self, param, value, use_repr=True):
"""Format a parameter value according to its fmtstr."""
if isinstance(value, list):
value = tuple(value)
fmtstr = self._getParamConfig(param).fmtstr
if fmtstr == '%r' and not use_repr:
fmtstr = '%s'
if fmtstr == 'main':
if isinstance(value, tuple):
fmtstr = '(' + ', '.join((self.fmtstr,) * len(value)) + ')'
else:
fmtstr = self.fmtstr
try:
ret = fmtstr % value
except (TypeError, ValueError):
ret = repr(value)
return ret
def _setMode(self, mode):
"""Set a new execution mode."""
self._mode = mode
if mode == SIMULATION:
# switching to simulation mode: remove cache entirely
# and rely on saved _params and values
self._cache = None
def history(self, name='value', fromtime=None, totime=None):
"""Return a history of the parameter *name* (can also be ``'value'`` or
``'status'``).
*fromtime* and *totime* can be used to limit the time window. They can
be:
* positive numbers: interpreted as UNIX timestamps
* negative numbers: interpreted as hours back from now
* strings: in one of the formats 'HH:MM', 'HH:MM:SS',
'YYYY-MM-DD', 'YYYY-MM-DD HH:MM' or 'YYYY-MM-DD HH:MM:SS'
Default is to query the values of the last hour.
"""
if not self._cache:
# no cache is configured for this setup
return []
else:
if fromtime is None:
fromtime = -1
if isinstance(fromtime, str):
fromtime = parseDateString(fromtime)
elif fromtime < 0:
fromtime = currenttime() + fromtime * 3600
if totime is None:
totime = currenttime()
elif isinstance(totime, str):
totime = parseDateString(totime, enddate=True)
elif totime < 0:
totime = currenttime() + totime * 3600
return self._cache.history(self, name, fromtime, totime)
def info(self):
"""Return "device information" as an iterable of tuples ``(name,
raw_value, formatted_value, unit, category)``.
This "device information" is put into data files and should therefore
include any parameters that will be essential to record the current
status of the instrument.
The default implementation already collects all parameters whose
``category`` property is set.
.. method:: doInfo()
This method can add more device information by returning it as a
sequence of tuples.
"""
ret = []
if hasattr(self, 'doInfo') and self._mode != SIMULATION:
ret.extend(self.doInfo())
selfunit = getattr(self, 'unit', '')
for category, name, unit in self._infoparams:
try:
parvalue = self._getFromCache(
name, lambda _maxage=None: getattr(self, name))
except Exception as err:
self.log.warning('error getting %s parameter', name, exc=err)
continue
parunit = (unit or '').replace('main', selfunit)
ret.append((name, parvalue,
self.formatParam(name, parvalue, use_repr=False),
parunit, category))
return ret
def shutdown(self):
"""Shut down the device. This method is called by the NICOS system when
the device is destroyed, manually or because the current setup is
unloaded.
.. method:: doShutdown()
This method is called, if present, but not in simulation mode. It
should perform cleanup, for example closing connections to hardware.
"""
self.log.debug('shutting down device')
caught_exc = None
if self._mode != SIMULATION:
# do not execute shutdown actions when simulating
# remove subscriptions to parameter value updates
if self._cache:
for param, func in self._subscriptions:
self._cache.removeCallback(self, param, func)
# execute custom shutdown actions
if hasattr(self, 'doShutdown'):
try:
self.doShutdown()
except Exception as err:
caught_exc = err
for adev in self._adevs.values():
if isinstance(adev, list):
for real_adev in adev:
real_adev._sdevs.discard(self._name)
real_adev._controllers.discard(self._name)
elif adev is not None:
adev._sdevs.discard(self._name)
adev._controllers.discard(self._name)
session.devices.pop(self._name, None)
session.device_case_map.pop(self._name.lower(), None)
session.explicit_devices.discard(self._name)
# re-raise the doShutdown error
if caught_exc is not None:
raise caught_exc
@usermethod
def version(self):
"""Return a list of versions for this device.
These are tuples (component, version) where a "component" can be the
name of a Python module, or an external dependency (like a TACO
server).
The base implementation already collects VCS revision information
available from all Python modules involved in the class inheritance
chain of the device class.
.. method:: doVersion()
This method is called if present, and should return a list of
(component, version) tuples that are added to the version info.
"""
versions = getVersions(self)
if not self._sim_intercept and hasattr(self, 'doVersion'):
versions.extend(self.doVersion())
return versions
def _cachelock_acquire(self, timeout=3):
"""Acquire an exclusive lock for using this device from the cache. This
can be used if read access to the device needs to be locked (write
access is locked anyway, since only one NICOS session can be the master
session at a time).
"""
if not self._cache:
return
start = currenttime()
while True:
try:
self._cache.lock(self._name)
except CacheLockError:
if currenttime() > start + timeout:
raise CommunicationError(
self, 'device locked in cache') from None
session.delay(self._base_loop_delay * 3)
else:
break
def _cachelock_release(self):
"""Release the exclusive cache lock for this device.
Always use like this::
self._cachelock_acquire()
try:
... # do locked operations
finally:
self._cachelock_release()
"""
if not self._cache:
return
try:
self._cache.unlock(self._name)
except CacheLockError:
raise CommunicationError(
self, 'device locked by other instance') from None
def _pollParam(self, name, with_ttl=0):
"""Read a parameter from the hardware and put its value into the cache.
This is intended to be used from :meth:`doPoll` methods, so that they
don't have to implement parameter polling themselves. For readable
devices, if *with_ttl* is > 0, the cached value gets the TTL of the
device value, determined by :attr:`maxage`, multiplied by *with_ttl*.
"""
value = getattr(self, 'doRead' + name.title())()
if with_ttl:
self._cache.put(self, name, value, currenttime(),
getattr(self, 'maxage', 0) * with_ttl)
else:
self._cache.put(self, name, value)
def pollParams(self, volatile_only=True, blocking=False, with_ttl=0,
param_list=None):
"""Poll all parameters (normally only volatile ones)."""
if param_list is None:
param_list = list(self.parameters)
param_list = [param for param in param_list if
self.parameters[param].volatile or
(not volatile_only and
hasattr(self, 'doRead' + param.title()))]
if blocking:
for param in param_list:
self._pollParam(param, with_ttl)
else:
self._cache.put_raw('poller/%s/pollparams' % self.name, param_list,
flag=FLAG_NO_STORE)
class Readable(Device):
"""
Base class for all readable devices.
Subclasses | |
'''
Assign stellar mass/magnitude to subhalos via abundance matching.
Masses in log {M_sun}, luminosities in log {L_sun / h^2}, distances in {Mpc comoving}.
'''
# system -----
#from __future__ import division
import numpy as np
from numpy import log10, Inf
from scipy import integrate, interpolate, ndimage
# local -----
#from visualize import plot_sm
try:
from utilities import utility as ut
except ImportError:
pass
def assign(sub, m_kind='m.star', scat=0, dis_mf=0.007, source='', sham_prop='m.max', zis=None):
'''
Assign Mag_r or M_star via abundance matching.
Import catalog of subhalo [at snapshot], mass kind (mag.r, m.star),
1-sigma mass scatter at fixed sham prop [dex], disruption mass fraction (for both cens & sats),
mass source, property to abundance match against, [snapshot index[s]].
'''
if isinstance(sub, list):
if zis is None:
raise ValueError('subhalo catalog is a tree list, but no input snapshot index[s]')
elif isinstance(sub, dict):
if zis is not None:
raise ValueError('input snapshot index[s], but input catalog of subhalo at snapshot')
sub = [sub]
zis = [0]
subz = sub[zis[0]]
vol = subz.info['box.length'] ** 3
print('Box Length', subz.info['box.length'])
print('Box Hubble', subz.Cosmo['hubble'])
zis = ut.array.arrayize(zis)
if m_kind == 'm.star':
if not source:
source = 'li-drory-march'
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = SMFClass(source, redshift, scat, subz.Cosmo['hubble'])
elif m_kind == 'mag.r':
if source == 'cool_ages':
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = LFClass(source, scat, subz.Cosmo['hubble'], redshift)
else:
if not source:
source = 'blanton'
MF = LFClass(source, scat, subz.Cosmo['hubble'])
else:
raise ValueError('not recognize m_kind = %s' % m_kind)
for zi in zis:
subz = sub[zi]
subz[m_kind] = np.zeros(subz[sham_prop].size, np.float32)
if m_kind == 'm.star':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
elif m_kind == 'mag.r':
if source == 'cool_ages':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
# maximum number of objects in volume to assign given SMF/LF threshold
num_max = int(round(MF.numden(MF.mmin) * vol))
sis = ut.array.elements(subz[sham_prop], [0.001, Inf])
if dis_mf:
sis = ut.array.elements(subz['m.frac.min'], [dis_mf, Inf], sis)
siis_sort = np.argsort(subz[sham_prop][sis]).astype(sis.dtype)[::-1][:num_max]
num_sums = ut.array.arange_length(num_max) + 1
if scat:
if m_kind == 'm.star':
scats = np.random.normal(np.zeros(num_max), MF.scat).astype(np.float32)
elif m_kind == 'mag.r':
scats = np.random.normal(np.zeros(num_max), 2.5 * MF.scat).astype(np.float32)
#print MF.m_scat(num_sums / vol) + scats
subz[m_kind][sis[siis_sort]] = MF.m_scat(num_sums / vol) + scats
else:
subz[m_kind][sis[siis_sort]] = MF.m(num_sums / vol)
class SMFClass:
'''
Relate number density [dnumden / dlog(M_star/M_sun)] <-> stellar mass [log10(M_star/M_sun)]
using fits to observed stellar mass functions.
All SMFs assume input Hubble constant.
'''
def __init__(self, source='li-march', redshift=0.1, scat=0, hubble=0.7):
'''
Import SMF source, redshift, log scatter in M_star at fixed Msub.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'li':
'''
Li & White 2009. z = 0.1 from SDSS. Chabrier IMF. Complete to 1e8 M_sun/h^2.
'''
self.redshifts = np.array([0.1])
self.mchars = np.array([10.525]) - 2 * log10(hubble) # {M_sun}
self.amplitudes = np.array([0.0083]) * hubble ** 3 # {Mpc ^ -3 / log(M/M_sun)}
self.slopes = np.array([-1.155])
self.initialize_redshift(redshift)
elif source == 'baldry':
'''
Baldry et al 2008. z = 0.1 from SDSS. diet Salpeter IMF = 0.7 Salpeter.
Complete to 1e8 M_sun.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1])
# covert to Chabrier
self.mchars = (np.array([10.525]) + 2 * log10(h_them / hubble) + log10(1 / 1.6 / 0.7))
self.amplitudes = np.array([0.00426]) * (hubble / h_them) ** 3
self.amplitudes2 = np.array([0.00058]) * (hubble / h_them) ** 3
self.slopes = np.array([-0.46])
self.slopes2 = np.array([-1.58])
self.initialize_redshift(redshift)
elif source == 'cole-march':
'''
Marchesini et al 2009. 1.3 < z < 4.0. Kroupa IMF.
z = 0.1 from Cole et al 2001 (2dF), converting their Salpeter to Kroupa.
*** In order to use out to z ~ 4, made evolution flat from z = 3.5 to 4.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.65, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
# converted to {Mpc ^ -3 dex ^ -1}
self.amplitudes = np.array([90.00, 29.65, 11.52, 1.55, 1.55]) * 1e-4 * hubble ** 3
self.slopes = np.array([-1.18, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march':
'''
Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march-extreme':
'''
More extreme version of Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.00001, 0.00001, 0.00001, 0.000001]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'constant-li':
'''
Li & White at all redshifts
'''
self.redshifts = np.arange(0.1, 4.03, 0.1)
self.mchars = np.repeat(10.525, len(self.redshifts)) - 2 * log10(hubble)
self.amplitudes = (np.repeat(0.0083, len(self.redshifts))* hubble ** 3)
self.slopes = np.repeat(-1.155, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'fontana':
'''
Fontana et al 2006. 0.4 < z < 4 from GOODS-MUSIC. Salpeter IMF.
z = 0.1 from Cole et al 2001.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 4.0]) # store redshift range of validity
self.amplitude0 = 0.0035 * (hubble / h_them) ** 3 # to {Mpc ^ -3 / log10(M/M_sun)}
self.amplitude1 = -2.2
self.slope0 = -1.18
self.slope1 = -0.082
self.mchar0 = 11.16 # log10(M/M_sun)
self.mchar1 = 0.17 # log10(M/M_sun)
self.mchar2 = -0.07 # log10(M/M_sun)
# convert to my hubble & Chabrier IMF
self.mchar0 += 2 * log10(h_them / hubble) - log10(1.6)
self.initialize_redshift(redshift)
elif source == 'li-drory-march':
'''
Drory et al 2009. 0.3 < z < 1.0 from COSMOS.
Chabrier IMF limited to 0.1 - 100 M_sun.
Complete to (8.0, 8.6, 8.9, 9.1) M_sun/h^2 at z = (0.3, 0.5, 0.7, 0.9).
Anchor to Li & White at z = 0.1, Marchesini et al at higher redshift.
See Ilbert et al 2010 for alternate COSMOS version.
'''
h_them = 0.72 # their assumed hubble constant
self.redshifts = np.array([0.3, 0.5, 0.7, 0.9])
self.mchars = np.array([10.90, 10.91, 10.95, 10.92]) + 2 * log10(h_them / hubble)
# convert to [Mpc ^ -3 dex^-1]
self.amplitudes = (np.array([0.00289, 0.00174, 0.00216, 0.00294]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.06, -1.05, -0.93, -0.91])
self.mchars2 = np.array([9.63, 9.70, 9.75, 9.85]) + 2 * log10(h_them / hubble)
self.amplitudes2 = (np.array([0.00180, 0.00143, 0.00289, 0.00212]) *
(hubble / h_them) ** 3)
self.slopes2 = np.array([-1.73, -1.76, -1.65, -1.65])
# add li & white
self.redshifts = np.append(0.1, self.redshifts)
self.mchars = np.append(10.525 - 2 * log10(hubble), self.mchars)
self.amplitudes = np.append(0.0083 * hubble ** 3, self.amplitudes)
self.slopes = np.append(-1.155, self.slopes)
self.mchars2 = np.append(self.mchars2[0], self.mchars2)
self.amplitudes2 = np.append(0, self.amplitudes2)
self.slopes2 = np.append(self.slopes2[0], self.slopes2)
# add marchesini et al
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.append(self.redshifts, [1.6, 2.5, 3.56, 4.03])
self.mchars = np.append(self.mchars,
np.array([10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble))
self.amplitudes = np.append(self.amplitudes,
np.array([0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.append(self.slopes, [-1.00, -1.01, -1.39, -1.39])
self.mchars2 = np.append(self.mchars2, np.zeros(4) + self.mchars2[0])
self.amplitudes2 = np.append(self.amplitudes2, np.zeros(4))
self.slopes2 = np.append(self.slopes2, np.zeros(4) + self.slopes2[0])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-drory-march_sameslope':
'''
Apply low-mass slope from Drory et al 2009 to Li & White, Marchesini et al.
'''
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.61, 10.62, 10.66, 10.63, 10.60, 10.65, 11.07,
11.07] - 2 * log10(hubble))
self.amplitudes = np.array([0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297,
0.00115, 0.000155, 0.000155]) * hubble ** 3
self.slopes = np.array([-1.155, -1.06, -1.05, -0.93, -0.91, -1.00, -1.01, -1.39, -1.39])
self.mchars2 = (np.array([9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83]) -
2 * log10(hubble))
self.amplitudes2 = np.array([0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962,
0.000375, 0.0000503, 0.0000503]) * hubble ** 3
self.slopes2 = np.array([-1.70, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'perez':
'''
Perez-Gonzalez et al 2008. | |
<filename>trainLib.py
import math
#constants and globals
background = '0'
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
dirs = {0 : "NORTH", 1 : "EAST", 2 : "SOUTH", 3 : "WEST"}
class CellElement(): #CellELement Interface for the subclasses
#Subclasses: RegularRoad, Switch, LevelCrossing, Bridge, Station
def setPosition(self, x, y):
return
def setOrientation(self, a):
return
def switchState(self):
return
def getDuration(self, entdir):
return
def getStop(self, entdir):
return
def nextCell(self,entdir):
return
def getView():
return
# Additional Interface methods added by us
def setCwRot(self):
return
def canEnter(self, entdir): # it checks the availability of the next cell in case of there is another train.
return
def getPos(self):
return
class GameGrid():
def __init__ (self, row, col):
self.row = row
self.col = col
self.grid = []
self.view = []
# Train refs to draw them on screen, on top of the tile view.
self.activeTrains = []
#default grid creation filled with background
for i in range(0, row):
self.grid.append([])
self.view.append([])
for j in range(0, col):
c = RegularRoad(True, self.grid)
#Eventhough it assigns a RegularRoad to every cell, we make it background changing the visuals of the cell. (bkz. CellElement.visuals)
#We choose it to implemet that way to avoid a creation for empty subclass for background cells and not to make code more complex.
c.visuals = '_'
c.setPosition(i,j)
self.grid[i].append(c)
#view grid is seperate than the actual grid. It keeps the visulas and used for display issues.
self.view[i].append(c.visuals)
def addElement(self, cellElm, row, col):
cellElm.setPosition(row, col)
self.grid[row][col] = cellElm
self.view[row][col] = cellElm.visuals
return
def removeElement(self, row, col):
empty = RegularRoad(True, self.grid) # (bkz. GameGrid.__init___ (): line 51)
empty.visuals = '_'
self.grid[row][col] = empty
self.view[row][col] = '_' # visual for background
return
def display(self):
for i in range(0,self.row):
for j in range(0, self.col):
print(self.view[i][j], end=' ')
print('\n')
def isOutOfBounds(self, i, j): #check whether the given positions exists or not
if(i >= self.row or j >= self.col or i < 0 or j < 0):
return True
return False
def updateView(self): # We provide this functionality by updtaing the view grid and display function where it needed.
return
def startSimulation(self):
return
def setPauseResume(self):
return
def stopSimulation(self):
return
def spawnTrain(self, wagonCount, row, col): # Creates trains at given row and column
if(self.isOutOfBounds(row,col)):
print("invalid spawn pos for train.", row, col)
return
spawnCell = self.grid[row][col]
t = Train(wagonCount, spawnCell, self)
self.registerTrain(t) # register train for the grid.
#For the phase1 it is not that functional but when we have more trains in later phases it will be used as it supposed to.
return t
def registerTrain(self, train):
self.activeTrains.append(train)
return
def trainDisappear(self,train):
self.activeTrains.remove(train)
return
def hasTrain(self, row, col): #it checks whether there is a train in the given cell or not
for t in self.activeTrains:
if(t.enginePosRow == row and t.enginePosCol == col):
return True
return False
class RegularRoad(CellElement):
# RegularRoad can be either a straight road or a right turn.
# We class them as this since they both have one entrance and exit.
def __init__(self, isStraight, gridRef):
self.visuals = '_'
self.rotationCount = 0
self.myGrid = gridRef #needs grid reference since we have to reach there to update grid.
self.row = -1
self.col = -1
self.isRegular = isStraight # if it is not straigt, it is a right turn. We exclude left turn here since it is the one time rotated version of right turn.
# For the sake of simplicity, we define left turn by rotating the right turn.
if(isStraight):
self.dir1 = SOUTH
self.dir2 = NORTH
self.visuals = '|'
else: # default is a Right turn as in the pdf.
# rotate this one time CW to get a left turn if needed
self.visuals = 'R'
self.dir1 = SOUTH
self.dir2 = EAST
return
def makeLeftTurn(self): # used for make a left turn from a right turn.
self.visuals = 'L'
self.rotationCount = 0 # When we rotate to get left turn the count has been increased.
# rotation count is assigned to 0 again since it should be a base case.
self.setOrientation( 1, False)
return self
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setCwRot(self): #it assigns the new directions CW of the roads.
self.dir1 = (self.dir1 + 1) % 4
self.dir2 = (self.dir2 + 1) % 4
return
def setOrientation(self, rotationAmount, incrRot : bool = True): #if incrRot is given False, it doesn't update the rotation amount. It is used for left turn object orientation.
if(incrRot):
self.rotationCount = (self.rotationCount + rotationAmount) % 4 # else assign the value in mod 4 to be able to detect new directions correctly.
for i in range(0, rotationAmount):
self.setCwRot() #does the real job
return
def switchState(self):
return
def getDuration(self, entdir): # default 1 for Regular Road
return 1
def getStop(self, entdir): # default 0 for Regular Road since not stop there
return 0
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# calculate exit direction of the cell using dir values.
self.exitDir = None
#if the given direction is the dir1 assign dir2 as exitDir and vice verca.
if(self.dir1 == entdir):
self.exitDir = self.dir2
elif self.dir2 == entdir:
self.exitDir = self.dir1
else: # if the given direction is not valid, exit
return None
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: # no available cell is found
return None
def getPos(self):
return self.row, self.col
def getView(self):
return self.visuals
def canEnter(self, entdir):
#check the availability / connectivity of nextcell
return (self.dir1 == entdir or self.dir2 == entdir)
class SwitchRoad(CellElement):
#There are three types of switchRoad. Explained in lines:237, 241, 246
def __init__(self, typeofSwitch, gridRef):
# create 'pieces' of the switch using RegularRoad since switches are just the combinations of them.
self.visuals = 'S'
self.myGrid = gridRef
self.rotationCount = 0
self.switchType = typeofSwitch # int value 1,2,3
self.pieces = {'direct' : RegularRoad(True, gridRef)} #We kept the pieces of the switches according to its type.
#for example, switchType-3 has one direct, one rightTurn and one leftTurn.
#since all switches has one RegulaarRoad in common, it is added the dictionary by default.
self.activePiece = self.pieces['direct'] # Keeps track of which part of the switch is active.
#Changed by switchState(). Defualt straight piece is the active one.
self.enter = SOUTH #default switch entrance location is south for all type of switches
self.switchDelay = 2 #used for make train slower in switches.
if(self.switchType == 1):
# straight + right turn
self.pieces['rightTurn'] = RegularRoad(False, gridRef)
elif(self.switchType == 2):
# straight + left turn
self.pieces['leftTurn'] = RegularRoad(False, gridRef) #As explained in RegularRoad class, it is cretaed as a right turn first.
self.pieces['leftTurn'].setOrientation(1, False) #Then rotate it one time and not update the rotationCount.
elif(self.switchType == 3):
# straight + right turn + left turn
self.pieces['rightTurn'] = RegularRoad(False, gridRef)
self.pieces['leftTurn'] = RegularRoad(False, gridRef)
self.pieces['leftTurn'].setOrientation(1, False)
return
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setCwRot(self):
# straightforward 90 degree rotation: S->W, W -> N and so on.
self.enter = (self.enter + 1) % 4
if(self.switchType == 1):
self.pieces['rightTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
elif(self.switchType == 2):
self.pieces['leftTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
else: #switchType is 3
self.pieces['rightTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
self.pieces['leftTurn'].setOrientation(1)
return
def setOrientation(self, rotationAmount):
# rotate 90 degrees CW, directly change dir variables.
self.rotationCount = (self.rotationCount + rotationAmount) % 4
for i in range(0, rotationAmount):
self.setCwRot()
return
def switchState(self):
# defined only for switch roads. Changes which piece is active.
if(self.switchType | |
# Copyright 2015 Radware LTD. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
import re
from neutron import context
from neutron import manager
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import queue as Queue
from neutron_lbaas.common.cert_manager import cert_manager
from neutron_lbaas.drivers.radware import exceptions as r_exc
from neutron_lbaas.drivers.radware import v2_driver
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
GET_200 = ('/api/workflow/', '/api/workflowTemplate')
SERVER_DOWN_CODES = (-1, 301, 307)
class QueueMock(Queue.Queue):
def __init__(self, completion_handler):
self.completion_handler = completion_handler
super(QueueMock, self).__init__()
def put_nowait(self, oper):
self.completion_handler(oper)
def _recover_function_mock(action, resource, data, headers, binary=False):
pass
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_description', None
if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
return val, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource.startswith(GET_200[1]):
return 200, '', '', rest_call_function_mock.WF_TEMPLATES_TO_RETURN
if resource.startswith(GET_200[0]):
if rest_call_function_mock.WORKFLOW_MISSING:
data = jsonutils.loads('{"complete":"True", "success": "True"}')
return 404, '', '', data
elif resource.endswith('parameters'):
return 200, '', '', {'stats': {'bytes_in': 100,
'total_connections': 2, 'active_connections': 1,
'bytes_out': 200}}
else:
return 200, '', '', ''
if resource.startswith(GET_200):
return 200, '', '', ''
else:
data = jsonutils.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 404, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = jsonutils.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCERV2:radwarev2:neutron_lbaas.'
'drivers.radware.v2_driver.'
'RadwareLBaaSV2Driver:default')
WF_SRV_PARAMS = {
"name": "_REPLACE_", "tenantId": "_REPLACE_", "haPair": False,
"sessionMirroringEnabled": False, "islVlan": -1,
"primary": {
"capacity": {
"throughput": 1000, "sslThroughput": 100,
"compressionThroughput": 100, "cache": 20},
"network": {
"type": "portgroup", "portgroups": "_REPLACE_"},
"adcType": "VA", "acceptableAdc": "Exact"},
"resourcePoolIds": []}
WF_CREATE_PARAMS = {'parameters':
{"provision_service": True, "configure_l3": True, "configure_l4": True,
"twoleg_enabled": False, "ha_network_name": "HA-Network",
"ha_ip_pool_name": "default", "allocate_ha_vrrp": True,
"allocate_ha_ips": True, "data_port": 1,
"data_ip_address": "192.168.200.99", "data_ip_mask": "255.255.255.0",
"gateway": "192.168.200.1", "ha_port": 2}}
WF_APPLY_EMPTY_LB_PARAMS = {'parameters': {
'loadbalancer': {'listeners': [], 'admin_state_up': True,
'pip_address': u'10.0.0.2', 'vip_address': u'10.0.0.2'}}}
class TestLBaaSDriverBase(
test_db_loadbalancerv2.LbaasPluginDbTestCase):
def setUp(self):
super(TestLBaaSDriverBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCERV2]
self.driver = self.plugin_instance.drivers['radwarev2']
class TestLBaaSDriverRestClient(TestLBaaSDriverBase):
def setUp(self):
super(TestLBaaSDriverRestClient, self).setUp()
self.flip_servers_mock = mock.Mock(
return_value=None)
self.recover_mock = mock.Mock(
side_effect=_recover_function_mock)
self.orig_recover = self.driver.rest_client._recover
self.orig_flip_servers = self.driver.rest_client._flip_servers
self.driver.rest_client._flip_servers = self.flip_servers_mock
self.driver.rest_client._recover = self.recover_mock
def test_recover_was_called(self):
"""Call REST client which fails and verify _recover is called."""
self.driver.rest_client.call('GET', '/api/workflowTemplate',
None, None)
self.recover_mock.assert_called_once_with('GET',
'/api/workflowTemplate',
None, None, False)
def test_flip_servers(self):
server = self.driver.rest_client.server
sec_server = self.driver.rest_client.secondary_server
self.driver.rest_client._recover = self.orig_recover
self.driver.rest_client.call('GET', '/api/workflowTemplate',
None, None)
self.flip_servers_mock.assert_called_once()
self.assertEqual(server, self.driver.rest_client.secondary_server)
self.assertEqual(sec_server, self.driver.rest_client.server)
class CertMock(cert_manager.Cert):
def __init__(self, cert_container):
pass
def get_certificate(self):
return "certificate"
def get_intermediates(self):
return "intermediates"
def get_private_key(self):
return "private_key"
def get_private_key_passphrase(self):
return "private_key_passphrase"
class TestLBaaSDriver(TestLBaaSDriverBase):
def setUp(self):
super(TestLBaaSDriver, self).setUp()
templates_to_return = [{'name': self.driver.workflow_template_name}]
for t in self.driver.child_workflow_template_names:
templates_to_return.append({'name': t})
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False, 'WORKFLOW_MISSING': True,
'WORKFLOW_TEMPLATE_MISSING': True,
'RESPOND_WITH_SERVER_DOWN': 200,
'WF_TEMPLATES_TO_RETURN': templates_to_return})
self.operation_completer_start_mock = mock.Mock(
return_value=None)
self.operation_completer_join_mock = mock.Mock(
return_value=None)
self.driver_rest_call_mock = mock.Mock(
side_effect=rest_call_function_mock)
self.flip_servers_mock = mock.Mock(
return_value=None)
self.recover_mock = mock.Mock(
side_effect=_recover_function_mock)
self.driver.completion_handler.start = (
self.operation_completer_start_mock)
self.driver.completion_handler.join = (
self.operation_completer_join_mock)
self.driver.rest_client.call = self.driver_rest_call_mock
self.driver.rest_client._call = self.driver_rest_call_mock
self.driver.completion_handler.rest_client.call = (
self.driver_rest_call_mock)
self.driver.queue = QueueMock(
self.driver.completion_handler.handle_operation_completion)
self.addCleanup(self.driver.completion_handler.join)
def test_verify_workflow_templates(self):
templates_to_return = []
for t in self.driver.child_workflow_template_names:
templates_to_return.append({'name': t})
rest_call_function_mock.__dict__.update(
{'WF_TEMPLATES_TO_RETURN': templates_to_return})
message = r_exc.WorkflowTemplateMissing.message % \
{'workflow_template': self.driver.workflow_template_name}
try:
self.driver._verify_workflow_templates()
except r_exc.WorkflowTemplateMissing as e:
self.assertEqual(e.msg, message)
templates_to_return.append(
{'name': self.driver.workflow_template_name})
rest_call_function_mock.__dict__.update(
{'WF_TEMPLATES_TO_RETURN': templates_to_return})
try:
self.driver._verify_workflow_templates()
self.assertTrue(True)
except r_exc.WorkflowTemplateMissing as e:
self.assertTrue(False)
def test_wf_created_on_first_member_creation(self):
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
with self.loadbalancer(subnet=vip_sub) as lb:
with self.listener(
loadbalancer_id=lb['loadbalancer']['id']) as listener:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=listener['listener']['id']) as pool:
self.driver_rest_call_mock.assert_has_calls([])
with self.member(pool_id=pool['pool']['id'],
subnet=vip_sub, address='10.0.1.10'):
self.driver_rest_call_mock.assert_called_once()
def test_wf_deleted_on_lb_deletion(self):
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
with self.loadbalancer(subnet=vip_sub) as lb:
get_calls = [
mock.call('GET', u'/api/workflow/LB_' +
lb['loadbalancer']['id'], None, None)]
with self.listener(
loadbalancer_id=lb['loadbalancer']['id']) as listener:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=listener['listener']['id']) as pool:
with self.member(pool_id=pool['pool']['id'],
subnet=vip_sub, address='10.0.1.10'):
self.driver_rest_call_mock.reset_mock()
rest_call_function_mock.__dict__.update(
{'WORKFLOW_MISSING': False})
self.driver_rest_call_mock.assert_has_calls(get_calls)
self.driver_rest_call_mock.reset_mock()
self.driver_rest_call_mock.assert_has_calls(get_calls)
self.driver_rest_call_mock.reset_mock()
self.driver_rest_call_mock.assert_has_calls(get_calls)
self.driver_rest_call_mock.reset_mock()
self.driver_rest_call_mock.assert_any_call(
'DELETE', u'/api/workflow/LB_' + lb['loadbalancer']['id'],
None, None)
def test_lb_crud(self):
with self.subnet(cidr='10.0.0.0/24') as s:
with self.loadbalancer(subnet=s, no_delete=True) as lb:
lb_id = lb['loadbalancer']['id']
with self.listener(loadbalancer_id=lb_id) as l:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=l['listener']['id']) as p:
self.driver_rest_call_mock.assert_has_calls([])
self.plugin_instance.update_loadbalancer(
context.get_admin_context(),
lb_id, {'loadbalancer': lb})
self.driver_rest_call_mock.assert_has_calls([])
lb_db = self.plugin_instance.db.get_loadbalancer(
context.get_admin_context(),
lb_id)
self.driver.load_balancer.refresh(
context.get_admin_context(), lb_db)
self.driver_rest_call_mock.assert_has_calls([])
with self.member(
no_delete=True, pool_id=p['pool']['id'],
subnet=s, address='10.0.1.10') as m:
m_data = {
"id": m['member']['id'],
"address": "10.0.1.10",
"protocol_port": 80,
"weight": 1, "admin_state_up": True,
"subnet": "255.255.255.255",
"mask": "255.255.255.255",
"gw": "255.255.255.255",
"admin_state_up": True}
wf_apply_params = {'parameters': {
'listeners': [{
"id": l['listener']['id'],
"admin_state_up": True,
"protocol_port": 80,
"protocol": lb_const.PROTOCOL_HTTP,
"connection_limit": -1,
"admin_state_up": True,
"default_pool": {
"id": p['pool']['id'],
"protocol": lb_const.PROTOCOL_HTTP,
"lb_algorithm":
"ROUND_ROBIN",
"admin_state_up": True,
"members": [m_data]}}],
"admin_state_up": True,
"pip_address": "10.0.0.2",
"vip_address": "10.0.0.2"}}
calls = [
mock.call(
'POST', '/api/workflowTemplate/' +
'os_lb_v2?name=LB_' + lb_id, mock.ANY,
v2_driver.TEMPLATE_HEADER),
mock.call(
'POST',
'/api/workflow/LB_' + lb_id +
'/action/apply',
wf_apply_params,
v2_driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
self.driver_rest_call_mock.reset_mock()
rest_call_function_mock.__dict__.update(
{'WORKFLOW_MISSING': False})
calls = [
mock.call(
'POST',
'/api/workflow/LB_' + lb_id +
'/action/apply',
wf_apply_params,
v2_driver.TEMPLATE_HEADER)
]
self.plugin_instance.update_loadbalancer(
context.get_admin_context(),
lb_id, {'loadbalancer': lb})
self.driver_rest_call_mock.assert_has_calls(calls)
self.driver_rest_call_mock.reset_mock()
lb_db = self.plugin_instance.db.get_loadbalancer(
context.get_admin_context(), lb_id)
self.driver.load_balancer.refresh(
context.get_admin_context(), lb_db)
self.driver_rest_call_mock.assert_has_calls(calls)
self.driver_rest_call_mock.reset_mock()
self.plugin_instance.delete_loadbalancer(
context.get_admin_context(), lb_id)
self.driver_rest_call_mock.assert_any_call(
'DELETE', '/api/workflow/LB_' + lb_id,
None, None)
self.assertRaises(loadbalancerv2.EntityNotFound,
self.plugin_instance.get_loadbalancer,
context.get_admin_context(), lb_id)
def test_lb_stats(self):
with self.subnet(cidr='10.0.0.0/24') as s:
with self.loadbalancer(subnet=s) as lb:
lb_id = lb['loadbalancer']['id']
with self.listener(loadbalancer_id=lb_id) as l:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=l['listener']['id']) as p:
with self.member(
no_delete=True, pool_id=p['pool']['id'],
subnet=s, address='10.0.1.10'):
rest_call_function_mock.__dict__.update(
{'WORKFLOW_MISSING': False})
stats = self.plugin_instance.stats(
context.get_admin_context(), lb_id,)
self.assertEqual(stats, {'stats': {'bytes_in': 100,
'total_connections': 2,
'active_connections': 1, 'bytes_out': 200}})
def test_member_crud(self):
with self.subnet(cidr='10.0.0.0/24') as s:
with self.loadbalancer(subnet=s) as lb:
lb_id = lb['loadbalancer']['id']
with self.listener(loadbalancer_id=lb_id) as l:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=l['listener']['id']) as p:
with contextlib.nested(
self.member(
no_delete=True, pool_id=p['pool']['id'],
subnet=s, address='10.0.1.10'),
self.member(
no_delete=True, pool_id=p['pool']['id'],
subnet=s, address='10.0.1.20')) as (m1, m2):
m1_data = {
"id": m1['member']['id'],
"address": "10.0.1.10",
"protocol_port": 80,
"weight": 1, "admin_state_up": True,
"subnet": "255.255.255.255",
"mask": "255.255.255.255",
"gw": "255.255.255.255",
"admin_state_up": True}
m2_data = {
"id": m2['member']['id'],
"address": "10.0.1.20",
"protocol_port": 80,
"weight": 1, "admin_state_up": True,
"subnet": "255.255.255.255",
"mask": "255.255.255.255",
"gw": "255.255.255.255",
"admin_state_up": True}
pool_data = {
"id": p['pool']['id'],
"protocol": lb_const.PROTOCOL_HTTP,
"lb_algorithm": "ROUND_ROBIN",
"admin_state_up": True,
"members": [m1_data, m2_data]}
listener_data = {
"id": l['listener']['id'],
"admin_state_up": True,
"protocol_port": 80,
"protocol": lb_const.PROTOCOL_HTTP,
"connection_limit": -1,
"admin_state_up": True,
"default_pool": pool_data}
wf_apply_params = {'parameters': {
'listeners': [listener_data],
"admin_state_up": True,
"pip_address": "10.0.0.2",
"vip_address": "10.0.0.2"}}
calls = [
mock.call(
'POST', '/api/workflowTemplate/' +
'os_lb_v2?name=LB_' + lb_id, mock.ANY,
v2_driver.TEMPLATE_HEADER),
mock.call(
'POST',
'/api/workflow/LB_' + lb_id +
'/action/apply',
wf_apply_params,
v2_driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
self.driver_rest_call_mock.reset_mock()
member = self.plugin_instance.db.get_pool_member(
context.get_admin_context(),
m1['member']['id']).to_dict(pool=False)
member['weight'] = 2
m1_data['weight'] = 2
self.plugin_instance.update_pool_member(
context.get_admin_context(),
m1['member']['id'], p['pool']['id'],
{'member': member})
calls = [
mock.call(
'POST',
'/api/workflow/LB_' + lb_id +
'/action/apply',
wf_apply_params,
v2_driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
self.driver_rest_call_mock.reset_mock()
self.plugin_instance.delete_pool_member(
context.get_admin_context(),
m2['member']['id'], p['pool']['id'])
pool_data["members"] = [m1_data]
calls = [
mock.call(
'POST',
'/api/workflow/LB_' + lb_id +
'/action/apply',
wf_apply_params,
v2_driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
lb = self.plugin_instance.db.get_loadbalancer(
context.get_admin_context(),
lb_id).to_dict(listener=False)
self.assertEqual(lb['provisioning_status'],
'ACTIVE')
def test_build_objects_with_tls(self):
with self.subnet(cidr='10.0.0.0/24') as vip_sub:
with self.loadbalancer(subnet=vip_sub) as lb:
lb_id = lb['loadbalancer']['id']
with contextlib.nested(
mock.patch('neutron_lbaas.services.loadbalancer.plugin.'
'cert_parser', autospec=True),
mock.patch('neutron_lbaas.services.loadbalancer.plugin.'
'CERT_MANAGER_PLUGIN.CertManager',
autospec=True)
) as (cert_parser_mock, cert_manager_mock):
cert_mock = mock.Mock(spec=cert_manager.Cert)
cert_mock.get_certificate.return_value = 'certificate'
cert_mock.get_intermediates.return_value = 'intermediates'
cert_mock.get_private_key.return_value = 'private_key'
cert_mock.get_private_key_passphrase.return_value = \
'private_key_passphrase'
cert_manager_mock.get_cert.return_value = cert_mock
cert_parser_mock.validate_cert.return_value = True
with self.listener(
protocol=lb_const.PROTOCOL_TERMINATED_HTTPS,
loadbalancer_id=lb_id,
default_tls_container_id='def1',
sni_container_ids=['sni1', 'sni2']) as listener:
with self.pool(
protocol=lb_const.PROTOCOL_HTTP,
listener_id=listener['listener']['id']) as pool:
with self.member(pool_id=pool['pool']['id'],
subnet=vip_sub,
address='10.0.1.10') as m:
wf_srv_params = copy.deepcopy(WF_SRV_PARAMS)
wf_params = copy.deepcopy(WF_CREATE_PARAMS)
wf_srv_params['name'] = 'srv_' + (
vip_sub['subnet']['network_id'])
wf_srv_params['tenantId'] = self._tenant_id
wf_srv_params['primary']['network'][
'portgroups'] = [vip_sub['subnet'][
'network_id']]
wf_params['parameters']['service_params'] = (
wf_srv_params)
m_data = {
"id": m['member']['id'],
"address": "10.0.1.10",
"protocol_port": 80,
"weight": 1, "admin_state_up": True,
"subnet": "255.255.255.255",
"mask": "255.255.255.255",
"gw": "255.255.255.255",
'admin_state_up': True}
default_tls_cert_data = {
'id': 'def1',
'certificate': 'certificate',
'intermediates': 'intermediates',
'private_key': 'private_key',
'passphrase': '<PASSWORD>'}
sni1_tls_cert_data = {
'id': 'sni1',
'position': 0,
'certificate': 'certificate',
'intermediates': 'intermediates',
'private_key': 'private_key',
'passphrase': '<PASSWORD>'}
sni2_tls_cert_data = {
'id': 'sni2',
'position': 1,
'certificate': 'certificate',
'intermediates': 'intermediates',
'private_key': 'private_key',
'passphrase': '<PASSWORD>'}
wf_apply_one_leg_params = {'parameters': {
'listeners': [{
"id": listener['listener']['id'],
| |
<reponame>RUB-SysSec/tropyhunter<filename>lib/emulation/random.py
from elftools.elf.elffile import ELFFile, RelocationSection
from capstone import Cs, CS_ARCH_X86, CS_MODE_64
from .fuzzing import fuzzing_end_point, fuzzing_coverage
from .emulation import *
from .core import FunctionOutput, FunctionOutputType, FunctionEnds, InputDataTypeRuleGenerator, RegisterInput
from .lib_emulation import register_lib_emulations
from ..entropy.entropy import check_entropy
from typing import Set, Tuple
class CandidateErrorCodes(object):
SUCCESS = 0
FUZZING_FAILED = 1
NO_OUTPUT_DSTS = 2
TIMEOUT_SINGLE_RUN = 3
FUZZING_FAILED_TIMEOUT = 4
def init_output_dsts(emu_env: EmulatorEnv, fct_addr: int, input_regs: Dict[int, RegisterInput]) -> Set[FunctionOutput]:
# Initialize possible output destinations.
output_dsts = set()
# Add return registers as possible output destination.
for return_reg in emu_env.return_regs:
output_dsts.add(FunctionOutput(fct_addr, FunctionOutputType.Register, return_reg))
# Add all pointer registers as possible output destinations.
for uc_input_reg in input_regs.keys():
if input_regs[uc_input_reg].input_type == RegisterInputType.Memory:
output_dsts.add(FunctionOutput(fct_addr, FunctionOutputType.Pointer, uc_input_reg))
return output_dsts
def init_emulator_env(fct_ends: FunctionEnds,
single_run_timeout,
fuzzing_timeout) -> EmulatorEnv:
emu_env = EmulatorEnv()
emu_env.stack_addr = 0x7fffffff0000 # TODO architecture specific
emu_env.emu_heap_start_addr = 0x2000000 # TODO architecture specific
emu_env.stack_size = 1024 * 1024
emu_env.special_start_addr = 0x4000 # TODO architecture specific
emu_env.stack_reg = UC_X86_REG_RSP # TODO architecture specific
emu_env.uc_arch = UC_ARCH_X86 # TODO architecture specific
emu_env.uc_mode = UC_MODE_64 # TODO architecture specific
emu_env.capstone = Cs(CS_ARCH_X86, CS_MODE_64)
emu_env.return_regs = [UC_X86_REG_RAX, UC_X86_REG_XMM0] # TODO architecture specific
emu_env.fct_ends = FunctionEnds.copy(fct_ends)
emu_env.single_run_timeout = single_run_timeout
emu_env.fuzzing_timeout = fuzzing_timeout
emu_env.emu_fd_start = 10
return emu_env
def load_binary(binary_file: str,
emu_env: EmulatorEnv) -> List[InitialMemoryObject]:
# Import plt functions if exist.
plt_fct_map = dict()
if os.path.isfile(binary_file + "_plt.txt"):
with open(binary_file + "_plt.txt", 'r') as fp:
for line in fp:
line_split = line.split(" ")
addr = int(line_split[0], 16)
name = line_split[1].strip()
plt_fct_map[name] = addr
# Extract segments to load from the binary file.
memory_objects = list()
with open(binary_file, 'rb') as fp:
elf_file = ELFFile(fp) # TODO architecture specific
# Set rebase address if we process position independent code.
if elf_file.header["e_type"] == "ET_DYN":
emu_env.rebase_addr = 0x400000
emu_env.fct_ends.rebase(emu_env.rebase_addr)
for name, addr in plt_fct_map.items():
plt_fct_map[name] = addr + emu_env.rebase_addr
# Get .dynsym section to resolve symbols.
dynsym_section = None
for section in elf_file.iter_sections():
if section.name == ".dynsym":
dynsym_section = section
break
# Get relocations from sections for loading of file.
# IMPORTANT NOTE: sections are optional and can also be forged with wrong information. Using sections
# here is more a convenience thing since the relocation information is also available using the segments
# (since the loader needs them).
relocations = dict() # type: Dict[int, bytes]
for section in elf_file.iter_sections():
if type(section) == RelocationSection:
for relocation in section.iter_relocations():
if relocation.is_RELA():
info_type = relocation["r_info_type"]
mem_addr = relocation["r_offset"] + emu_env.rebase_addr
addend = relocation["r_addend"]
if info_type == 7: # R_X86_64_JUMP_SLO
symbol_no = relocation["r_info_sym"]
if dynsym_section and section.name == ".rela.plt":
symbol = dynsym_section.get_symbol(symbol_no)
value = symbol["st_value"]
if value != 0:
value += emu_env.rebase_addr
# Remove plt function entry if we have a relocation entry for it.
if symbol.name in plt_fct_map.keys():
plt_import_value = plt_fct_map[symbol.name]
# Ignore each plt relocation entry that writes the same address as
# the plt entry already has.
if value == plt_import_value:
continue
del plt_fct_map[symbol.name]
relocations[mem_addr] = struct.pack("Q", value)
elif info_type == 6: # R_X86_64_GLOB_DAT
symbol_no = relocation["r_info_sym"]
if dynsym_section and section.name == ".rela.dyn":
symbol = dynsym_section.get_symbol(symbol_no)
value = symbol["st_value"]
if value != 0:
value += emu_env.rebase_addr
# Remove plt function entry if we have a relocation entry for it.
if symbol.name in plt_fct_map.keys():
plt_import_value = plt_fct_map[symbol.name]
# Ignore each plt relocation entry that writes the same address as
# the plt entry already has.
if value == plt_import_value:
continue
del plt_fct_map[symbol.name]
relocations[mem_addr] = struct.pack("Q", value)
elif info_type == 37: # R_X86_64_IRELATIVE
relocations[mem_addr] = struct.pack("Q", addend)
else:
raise NotImplementedError("Relocations that are not RELA not impemented yet.")
for segment in elf_file.iter_segments():
# We are only interested in the data that is loaded into memory.
if segment.header.p_type != "PT_LOAD":
continue
mem_addr = segment.header.p_vaddr + emu_env.rebase_addr
mem_size = segment.header.p_memsz
# Extract data from file since elftools seems to fuckup streams.
file_offset = segment.header.p_offset
file_size = segment.header.p_filesz
fp.seek(file_offset)
data = fp.read(file_size)
init_mem_obj = InitialMemoryObject(mem_addr, mem_size, data)
# Modify memory to load with relocations.
for relocation_addr, relocation_data in relocations.items():
if mem_addr <= relocation_addr <= (mem_addr + mem_size):
init_mem_obj.change_data(relocation_addr, relocation_data)
emu_env.relocation_addrs.add(relocation_addr)
memory_objects.append(init_mem_obj)
# Store imported plt functions.
for name, addr in plt_fct_map.items():
emu_env.plt_functions[addr] = name
return memory_objects
def create_candidate_data_emulation(binary_file: str,
mu: Uc,
emu_env: EmulatorEnv,
fct_start: int,
input_regs: Dict[int, RegisterInput],
min_size_data: int,
output_threshold: int,
fuzzing_max_attempts: int) -> Tuple[int, Set[FunctionOutput]]:
# Reset usage of fuzzing.
emu_env.fuzzing_used_end_point = False
emu_env.fuzzing_used_coverage = False
# Rebase start address before run.
fct_start += emu_env.rebase_addr
# Initialize possible output destinations.
output_dsts = init_output_dsts(emu_env, fct_start, input_regs)
# Check if we have a argument given as value which we consider as size argument.
is_size_arg_given = False
for _, input_reg in input_regs.items():
if input_reg.input_type == RegisterInputType.Value:
is_size_arg_given = True
break
error_code = CandidateErrorCodes.SUCCESS
ctr = 0
fuzzing_ctr = 0
max_rounds = int(min_size_data / 4) + 1
percent_ctr = int(max_rounds / 10) + 1
while ctr < max_rounds:
if ctr == 0:
print("Starting emulation process to generate random data.")
ctr += 1
if ctr % percent_ctr == 0:
print("%d%% of rounds are finished." % (int(ctr / percent_ctr) * 10))
# Check if every output destination has already created the minimum number of bytes
# we want to extract and stop emulation if we have.
is_min_data_reached = True
for output_dst in output_dsts:
if output_dst.size_data() < min_size_data:
is_min_data_reached = False
break
if is_min_data_reached:
print("Minimum output data of %d bytes reached for each output destination. Stopping emulation."
% min_size_data)
break
emulate_function(mu, emu_env, fct_start, input_regs)
# Abort if we had a timeout of a single run (most likely we hit an infinity loop).
if emu_env.single_run_is_timeout:
error_code = CandidateErrorCodes.TIMEOUT_SINGLE_RUN
return (error_code, set())
# Check if emulation ended at an instruction we wanted it to end.
# If not we search for data read from the memory that holds still the initial
# value and mark it for changing.
last_end_addr = mu.reg_read(UC_X86_REG_RIP)
if emu_env.fct_ends.end_valid(last_end_addr):
error_code = CandidateErrorCodes.SUCCESS
# Extract output data.
for output_dst in output_dsts:
# Extract output from the register as destination.
if output_dst.output_type == FunctionOutputType.Register:
reg_data = mu.reg_read(output_dst.register)
# Always consider output to be 4 bytes (because we do not know if the PRNG
# works on int32.
if output_dst.register == UC_X86_REG_XMM0:
curr_output = struct.pack("d", reg_data)
elif reg_data > 4294967295:
curr_output = struct.pack("Q", reg_data)
else:
curr_output = struct.pack("I", reg_data)
output_dst.add_data(curr_output[0:4])
# Extract output from a memory region given as input pointer.
elif output_dst.output_type == FunctionOutputType.Pointer:
addr = input_regs[output_dst.register].value
# Extract 8 bytes if we have an argument which we consider as size argument.
if is_size_arg_given:
curr_output = mu.mem_read(addr, 8)
# If we do not have a size argument, let us play safe and only consider 4 bytes as output.
else:
curr_output = mu.mem_read(addr, 4)
output_dst.add_data(bytes(curr_output))
else:
raise NotImplementedError("Do not know type of output destination.")
# When the same output occurs more often than the given threshold we remove the
# output destination as possible target.
if (ctr % output_threshold) == 0:
for output_dst in set(output_dsts):
if output_dst.data_threshold_reached(output_threshold):
output_dsts.remove(output_dst)
# If we do not have any possible output targets anymore we can abort the emulation
# as unsuccessful.
if not output_dsts:
# Perhaps some initial state has to be set to reach the correct basic block. Perform coverage
# guided fuzzing in order to find new ways to reach a basic block.
print("Ended with no output destination candidate. Starting coverage fuzzing process.")
if fuzzing_ctr < fuzzing_max_attempts:
fuzzing_ctr += 1
if fuzzing_coverage(mu, emu_env, fct_start, input_regs):
# Reset emulation loop in order to retry finding suitable output destinations.
output_dsts = init_output_dsts(emu_env, fct_start, input_regs)
error_code = CandidateErrorCodes.SUCCESS
ctr = 0
continue
else:
print("Maximal number of fuzzing attempts reached. Skipping fuzzing process.")
print("No new coverage found for output generation.")
error_code = CandidateErrorCodes.NO_OUTPUT_DSTS
break
else:
# Start fuzzing of the function to find a memory configuration that leads to a
# valid end instruction. If we have found one, continue emulation.
print("Ended in wrong function end %08x. Starting fuzzing process." % last_end_addr)
if fuzzing_ctr < fuzzing_max_attempts:
fuzzing_ctr += 1
if fuzzing_end_point(mu, emu_env, fct_start, input_regs):
ctr -= 1
continue
else:
print("Maximal number of fuzzing attempts reached. Skipping | |
import condition, seed, substitute, trace
>>> import numpyro.distributions as dist
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(condition(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
>>> assert exec_trace['a']['is_observed']
"""
def __init__(self, fn=None, data=None, condition_fn=None):
self.condition_fn = condition_fn
self.data = data
if sum((x is not None for x in (data, condition_fn))) != 1:
raise ValueError(
"Only one of `data` or `condition_fn` " "should be provided."
)
super(condition, self).__init__(fn)
def process_message(self, msg):
if (msg["type"] != "sample") or msg.get("_control_flow_done", False):
if msg["type"] == "control_flow":
if self.data is not None:
msg["kwargs"]["substitute_stack"].append(("condition", self.data))
if self.condition_fn is not None:
msg["kwargs"]["substitute_stack"].append(
("condition", self.condition_fn)
)
return
if self.data is not None:
value = self.data.get(msg["name"])
else:
value = self.condition_fn(msg)
if value is not None:
msg["value"] = value
msg["is_observed"] = True
class infer_config(Messenger):
"""
Given a callable `fn` that contains NumPyro primitive calls
and a callable `config_fn` taking a trace site and returning a dictionary,
updates the value of the infer kwarg at a sample site to config_fn(site).
:param fn: a stochastic function (callable containing NumPyro primitive calls)
:param config_fn: a callable taking a site and returning an infer dict
"""
def __init__(self, fn=None, config_fn=None):
super().__init__(fn)
self.config_fn = config_fn
def process_message(self, msg):
if msg["type"] in ("sample",):
msg["infer"].update(self.config_fn(msg))
class lift(Messenger):
"""
Given a stochastic function with ``param`` calls and a prior distribution,
create a stochastic function where all param calls are replaced by sampling from prior.
Prior should be a distribution or a dict of names to distributions.
Consider the following NumPyro program:
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import lift
>>>
>>> def model(x):
... s = numpyro.param("s", 0.5)
... z = numpyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> lifted_model = lift(model, prior={"s": dist.Exponential(0.3)})
``lift`` makes ``param`` statements behave like ``sample`` statements
using the distributions in ``prior``. In this example, site `s` will now behave
as if it was replaced with ``s = numpyro.sample("s", dist.Exponential(0.3))``.
:param fn: function whose parameters will be lifted to random values
:param prior: prior function in the form of a Distribution or a dict of Distributions
"""
def __init__(self, fn=None, prior=None):
super().__init__(fn)
self.prior = prior
self._samples_cache = {}
def __enter__(self):
self._samples_cache = {}
return super().__enter__()
def __exit__(self, *args, **kwargs):
self._samples_cache = {}
return super().__exit__(*args, **kwargs)
def process_message(self, msg):
if msg["type"] != "param":
return
name = msg["name"]
fn = self.prior.get(name) if isinstance(self.prior, dict) else self.prior
if isinstance(fn, numpyro.distributions.Distribution):
msg["type"] = "sample"
msg["fn"] = fn
msg["args"] = ()
msg["kwargs"] = {
"rng_key": msg["kwargs"].get("rng_key", None),
"sample_shape": msg["kwargs"].get("sample_shape", ()),
}
msg["intermediates"] = []
msg["infer"] = msg.get("infer", {})
else:
# otherwise leave as is
return
if name in self._samples_cache:
# Multiple pyro.param statements with the same
# name. Block the site and fix the value.
msg["value"] = self._samples_cache[name]["value"]
msg["is_observed"] = True
msg["stop"] = True
else:
self._samples_cache[name] = msg
msg["is_observed"] = False
class mask(Messenger):
"""
This messenger masks out some of the sample statements elementwise.
:param mask: a boolean or a boolean-valued array for masking elementwise log
probability of sample sites (`True` includes a site, `False` excludes a site).
"""
def __init__(self, fn=None, mask=True):
if jnp.result_type(mask) != "bool":
raise ValueError("`mask` should be a bool array.")
self.mask = mask
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
if msg["type"] == "inspect":
msg["mask"] = (
self.mask if msg["mask"] is None else (self.mask & msg["mask"])
)
return
msg["fn"] = msg["fn"].mask(self.mask)
class reparam(Messenger):
"""
Reparametrizes each affected sample site into one or more auxiliary sample
sites followed by a deterministic transformation [1].
To specify reparameterizers, pass a ``config`` dict or callable to the
constructor. See the :mod:`numpyro.infer.reparam` module for available
reparameterizers.
Note some reparameterizers can examine the ``*args,**kwargs`` inputs of
functions they affect; these reparameterizers require using
``handlers.reparam`` as a decorator rather than as a context manager.
[1] <NAME>, <NAME>, <NAME> (2019)
"Automatic Reparameterisation of Probabilistic Programs"
https://arxiv.org/pdf/1906.03028.pdf
:param config: Configuration, either a dict mapping site name to
:class:`~numpyro.infer.reparam.Reparam` ,
or a function mapping site to
:class:`~numpyro.infer.reparam.Reparam` or None.
:type config: dict or callable
"""
def __init__(self, fn=None, config=None):
assert isinstance(config, dict) or callable(config)
self.config = config
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
return
if isinstance(self.config, dict):
reparam = self.config.get(msg["name"])
else:
reparam = self.config(msg)
if reparam is None:
return
new_fn, value = reparam(msg["name"], msg["fn"], msg["value"])
if value is not None:
if new_fn is None:
msg["type"] = "deterministic"
msg["value"] = value
for key in list(msg.keys()):
if key not in ("type", "name", "value"):
del msg[key]
return
if msg["value"] is None:
msg["is_observed"] = True
msg["value"] = value
msg["fn"] = new_fn
class scale(Messenger):
"""
This messenger rescales the log probability score.
This is typically used for data subsampling or for stratified sampling of data
(e.g. in fraud detection where negatives vastly outnumber positives).
:param scale: a positive scaling factor that is broadcastable to the shape
of log probability.
:type scale: float or numpy.ndarray
"""
def __init__(self, fn=None, scale=1.0):
if not_jax_tracer(scale):
if np.any(np.less_equal(scale, 0)):
raise ValueError("'scale' argument should be positive.")
self.scale = scale
super().__init__(fn)
def process_message(self, msg):
if msg["type"] not in ("param", "sample", "plate"):
return
msg["scale"] = (
self.scale if msg.get("scale") is None else self.scale * msg["scale"]
)
class scope(Messenger):
"""
This handler prepend a prefix followed by a divider to the name of sample sites.
**Example:**
.. doctest::
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import scope, seed, trace
>>> def model():
... with scope(prefix="a"):
... with scope(prefix="b", divider="."):
... return numpyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/b.x" in trace(seed(model, 0)).get_trace()
:param fn: Python callable with NumPyro primitives.
:param str prefix: a string to prepend to sample names
:param str divider: a string to join the prefix and sample name; default to `'/'`
"""
def __init__(self, fn=None, prefix="", divider="/"):
self.prefix = prefix
self.divider = divider
super().__init__(fn)
def process_message(self, msg):
if msg.get("name"):
msg["name"] = f"{self.prefix}{self.divider}{msg['name']}"
if msg.get("cond_indep_stack"):
msg["cond_indep_stack"] = [
CondIndepStackFrame(
f"{self.prefix}{self.divider}{i.name}", i.dim, i.size
)
for i in msg["cond_indep_stack"]
]
class seed(Messenger):
"""
JAX uses a functional pseudo random number generator that requires passing
in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The
`seed` handler allows us to initially seed a stochastic function with a
:func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample`
primitive inside the function results in a splitting of this initial seed
so that we use a fresh seed for each subsequent call without having to
explicitly pass in a `PRNGKey` to each `sample` call.
:param fn: Python callable with NumPyro primitives.
:param rng_seed: a random number generator seed.
:type rng_seed: int, jnp.ndarray scalar, or jax.random.PRNGKey
.. note::
Unlike in Pyro, `numpyro.sample` primitive cannot be used without wrapping
it in seed handler since there is no global random state. As such,
users need to use `seed` as a contextmanager to generate samples from
distributions or as a decorator for their model callable (See below).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.handlers
>>> import numpyro.distributions as dist
>>> # as context manager
>>> with handlers.seed(rng_seed=1):
... x = numpyro.sample('x', dist.Normal(0., 1.))
>>> def model():
... return numpyro.sample('y', dist.Normal(0., 1.))
>>> # as function decorator (/modifier)
>>> y = handlers.seed(model, rng_seed=1)()
>>> assert x == y
"""
def __init__(self, fn=None, rng_seed=None):
if isinstance(rng_seed, int) or (
isinstance(rng_seed, (np.ndarray, jnp.ndarray)) and not jnp.shape(rng_seed)
):
rng_seed = random.PRNGKey(rng_seed)
if not (
isinstance(rng_seed, (np.ndarray, jnp.ndarray))
and rng_seed.dtype == jnp.uint32
and rng_seed.shape == (2,)
):
raise TypeError("Incorrect type for rng_seed: {}".format(type(rng_seed)))
self.rng_key = rng_seed
super(seed, self).__init__(fn)
def process_message(self, msg):
if (
msg["type"] == "sample"
and not msg["is_observed"]
and msg["kwargs"]["rng_key"] is None
) or msg["type"] in ["prng_key", "plate", "control_flow"]:
if msg["value"] is not None:
# no need to create a new key when value is available
return
self.rng_key, rng_key_sample = random.split(self.rng_key)
msg["kwargs"]["rng_key"] = rng_key_sample
class substitute(Messenger):
"""
Given a callable `fn` and a dict `data` keyed by site names
(alternatively, a callable `substitute_fn`), return a callable
which substitutes all primitive calls in `fn` with values from
`data` | |
b*r5, evaluate=False)
((sqrt(3)*a, sqrt(5)*b, sqrt(2)*(a + b)), 3)
>>> collect_sqrt(a*sqrt(2) + b, evaluate=False)
((b, sqrt(2)*a), 1)
>>> collect_sqrt(a + b, evaluate=False)
((a + b,), 0)
See Also
========
collect, collect_const, rcollect
"""
if evaluate is None:
evaluate = global_evaluate[0]
# this step will help to standardize any complex arguments
# of sqrts
coeff, expr = expr.as_content_primitive()
vars = set()
for a in Add.make_args(expr):
for m in a.args_cnc()[0]:
if m.is_number and (
m.is_Pow and m.exp.is_Rational and m.exp.q == 2 or
m is S.ImaginaryUnit):
vars.add(m)
# we only want radicals, so exclude Number handling; in this case
# d will be evaluated
d = collect_const(expr, *vars, Numbers=False)
hit = expr != d
if not evaluate:
nrad = 0
# make the evaluated args canonical
args = list(ordered(Add.make_args(d)))
for i, m in enumerate(args):
c, nc = m.args_cnc()
for ci in c:
# XXX should this be restricted to ci.is_number as above?
if ci.is_Pow and ci.exp.is_Rational and ci.exp.q == 2 or \
ci is S.ImaginaryUnit:
nrad += 1
break
args[i] *= coeff
if not (hit or nrad):
args = [Add(*args)]
return tuple(args), nrad
return coeff*d
def collect_const(expr, *vars, **kwargs):
"""A non-greedy collection of terms with similar number coefficients in
an Add expr. If ``vars`` is given then only those constants will be
targeted. Although any Number can also be targeted, if this is not
desired set ``Numbers=False`` and no Float or Rational will be collected.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import a, s, x, y, z
>>> from sympy.simplify.simplify import collect_const
>>> collect_const(sqrt(3) + sqrt(3)*(1 + sqrt(2)))
sqrt(3)*(sqrt(2) + 2)
>>> collect_const(sqrt(3)*s + sqrt(7)*s + sqrt(3) + sqrt(7))
(sqrt(3) + sqrt(7))*(s + 1)
>>> s = sqrt(2) + 2
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7))
(sqrt(2) + 3)*(sqrt(3) + sqrt(7))
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7), sqrt(3))
sqrt(7) + sqrt(3)*(sqrt(2) + 3) + sqrt(7)*(sqrt(2) + 2)
The collection is sign-sensitive, giving higher precedence to the
unsigned values:
>>> collect_const(x - y - z)
x - (y + z)
>>> collect_const(-y - z)
-(y + z)
>>> collect_const(2*x - 2*y - 2*z, 2)
2*(x - y - z)
>>> collect_const(2*x - 2*y - 2*z, -2)
2*x - 2*(y + z)
See Also
========
collect, collect_sqrt, rcollect
"""
if not expr.is_Add:
return expr
recurse = False
Numbers = kwargs.get('Numbers', True)
if not vars:
recurse = True
vars = set()
for a in expr.args:
for m in Mul.make_args(a):
if m.is_number:
vars.add(m)
else:
vars = sympify(vars)
if not Numbers:
vars = [v for v in vars if not v.is_Number]
vars = list(ordered(vars))
for v in vars:
terms = defaultdict(list)
Fv = Factors(v)
for m in Add.make_args(expr):
f = Factors(m)
q, r = f.div(Fv)
if r.is_one:
# only accept this as a true factor if
# it didn't change an exponent from an Integer
# to a non-Integer, e.g. 2/sqrt(2) -> sqrt(2)
# -- we aren't looking for this sort of change
fwas = f.factors.copy()
fnow = q.factors
if not any(k in fwas and fwas[k].is_Integer and not
fnow[k].is_Integer for k in fnow):
terms[v].append(q.as_expr())
continue
terms[S.One].append(m)
args = []
hit = False
uneval = False
for k in ordered(terms):
v = terms[k]
if k is S.One:
args.extend(v)
continue
if len(v) > 1:
v = Add(*v)
hit = True
if recurse and v != expr:
vars.append(v)
else:
v = v[0]
# be careful not to let uneval become True unless
# it must be because it's going to be more expensive
# to rebuild the expression as an unevaluated one
if Numbers and k.is_Number and v.is_Add:
args.append(_keep_coeff(k, v, sign=True))
uneval = True
else:
args.append(k*v)
if hit:
if uneval:
expr = _unevaluated_Add(*args)
else:
expr = Add(*args)
if not expr.is_Add:
break
return expr
def _split_gcd(*a):
"""
split the list of integers ``a`` into a list of integers, ``a1`` having
``g = gcd(a1)``, and a list ``a2`` whose elements are not divisible by
``g``. Returns ``g, a1, a2``
Examples
========
>>> from sympy.simplify.simplify import _split_gcd
>>> _split_gcd(55, 35, 22, 14, 77, 10)
(5, [55, 35, 10], [22, 14, 77])
"""
g = a[0]
b1 = [g]
b2 = []
for x in a[1:]:
g1 = gcd(g, x)
if g1 == 1:
b2.append(x)
else:
g = g1
b1.append(x)
return g, b1, b2
def _is_sum_surds(p):
args = p.args if p.is_Add else [p]
for y in args:
if not ((y**2).is_Rational and y.is_real):
return False
return True
def _nthroot_solve(p, n, prec):
"""
helper function for ``nthroot``
It denests ``p**Rational(1, n)`` using its minimal polynomial
"""
from sympy.polys.numberfields import _minimal_polynomial_sq
from sympy.solvers import solve
while n % 2 == 0:
p = sqrtdenest(sqrt(p))
n = n // 2
if n == 1:
return p
pn = p**Rational(1, n)
x = Symbol('x')
f = _minimal_polynomial_sq(p, n, x)
if f is None:
return None
sols = solve(f, x)
for sol in sols:
if abs(sol - pn).n() < 1./10**prec:
sol = sqrtdenest(sol)
if _mexpand(sol**n) == p:
return sol
def nthroot(expr, n, max_len=4, prec=15):
"""
compute a real nth-root of a sum of surds
Parameters
==========
expr : sum of surds
n : integer
max_len : maximum number of surds passed as constants to ``nsimplify``
Algorithm
=========
First ``nsimplify`` is used to get a candidate root; if it is not a
root the minimal polynomial is computed; the answer is one of its
roots.
Examples
========
>>> from sympy.simplify.simplify import nthroot
>>> from sympy import Rational, sqrt
>>> nthroot(90 + 34*sqrt(7), 3)
sqrt(7) + 3
"""
from sympy.simplify.sqrtdenest import sqrt_depth, is_algebraic
expr = sympify(expr)
n = sympify(n)
p = expr**Rational(1, n)
if not n.is_integer:
return p
if not _is_sum_surds(expr):
return p
surds = []
coeff_muls = [x.as_coeff_Mul() for x in expr.args]
for x, y in coeff_muls:
if not x.is_rational:
return p
if y is S.One:
continue
if not (y.is_Pow and y.exp == S.Half and y.base.is_integer):
return p
surds.append(y)
surds.sort()
surds = surds[:max_len]
if expr < 0 and n % 2 == 1:
p = (-expr)**Rational(1, n)
a = nsimplify(p, constants=surds)
res = a if _mexpand(a**n) == _mexpand(-expr) else p
return -res
a = nsimplify(p, constants=surds)
if _mexpand(a) is not _mexpand(p) and _mexpand(a**n) == _mexpand(expr):
return _mexpand(a)
expr = _nthroot_solve(expr, n, prec)
if expr is None:
return p
return expr
def split_surds(expr):
"""
split an expression with terms whose squares are rationals
into a sum of terms whose surds squared have gcd equal to g
and a sum of terms with surds squared prime with g
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.simplify import split_surds
>>> split_surds(3*sqrt(3) + sqrt(5)/7 + sqrt(6) + sqrt(10) + sqrt(15))
(3, sqrt(2) + sqrt(5) + 3, sqrt(5)/7 + sqrt(10))
"""
args = sorted(expr.args, key=default_sort_key)
coeff_muls = [x.as_coeff_Mul() for x in args]
surds = [x[1]**2 for x in coeff_muls if x[1].is_Pow]
surds.sort(key=default_sort_key)
g, b1, b2 = _split_gcd(*surds)
g2 = g
if not b2 and len(b1) >= 2:
b1n = [x/g for x in b1]
b1n = [x for x in b1n if x != 1]
# only a common factor has been factored; split again
g1, b1n, b2 = _split_gcd(*b1n)
g2 = g*g1
a1v, a2v = [], []
for c, s in coeff_muls:
if s.is_Pow and s.exp == S.Half:
s1 = s.base
if s1 in b1:
a1v.append(c*sqrt(s1/g2))
else:
a2v.append(c*s)
else:
a2v.append(c*s)
a = Add(*a1v)
b = Add(*a2v)
return g2, a, b
def rad_rationalize(num, den):
"""
Rationalize num/den by removing square roots in the denominator;
num and den are sum of terms whose squares are rationals
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.simplify import rad_rationalize
>>> rad_rationalize(sqrt(3), 1 + sqrt(2)/3)
(-sqrt(3) + sqrt(6)/3, -7/9)
"""
if not den.is_Add:
return num, den
g, a, b = split_surds(den)
a = a*sqrt(g)
num = _mexpand((a - b)*num)
den = _mexpand(a**2 - b**2)
return rad_rationalize(num, den)
def radsimp(expr, symbolic=True, max_terms=4):
"""
Rationalize the denominator by removing square roots.
Note: the expression returned from radsimp must be used with | |
mass fractions.
xc12 = rho_c12[rho_T_mask] / dens
xsi28 = rho_si28[rho_T_mask] / dens
xni56 = rho_ni56[rho_T_mask] / dens
# What we want is to make a mask array that determines which
# zones are dominated by each of these three species.
c12_mask = np.where(np.logical_and(xc12 > xsi28, xc12 > xni56))
si28_mask = np.where(np.logical_and(xsi28 > xc12, xsi28 > xni56))
ni56_mask = np.where(np.logical_and(xni56 > xc12, xni56 > xsi28))
# Now create separate scatterplots for each of these three species.
markersize = 40
# The \! fixes an issue with spacing after a superscript in
# the version of matplotlib that is shipped with yt, though
# it looks like it was fixed as of November 2015:
# https://github.com/matplotlib/matplotlib/pull/4873
plt.rcParams['mathtext.default'] = 'regular' # So the exponent is the same font as the text
plt.scatter(dens[c12_mask], temp[c12_mask], color='g', s=markersize, marker='o', label=r'${}^{12\!}$C')
plt.scatter(dens[si28_mask], temp[si28_mask], color='b', s=markersize, marker='s', label=r'${}^{28\!}$Si')
plt.scatter(dens[ni56_mask], temp[ni56_mask], color='r', s=markersize, marker='d', label=r'${}^{56\!}$Ni')
# Insert a buffer at the bottom of the plot since there will
# likely be a lot of points at the floor.
min_temp = 0.7 * min_temp
min_dens = 0.7 * min_dens
plt.xlim([min_dens, max_dens])
plt.ylim([min_temp, max_temp])
plt.xscale('log')
plt.yscale('log')
# Axis labels and legend.
plt.xlabel(r'Density (g / cm$^{-3\!}$)', fontsize=20)
plt.ylabel(r'Temperature (K)', fontsize=20)
plt.tick_params(labelsize=16)
plt.legend(loc='upper left', fontsize=16, scatterpoints=1)
# Save the plotfile.
plt.tight_layout()
plt.savefig(output_filename)
insert_commits_into_eps(output_filename, pltfile, 'plot')
plt.close()
def rho_T_sliceplot(output_filename, pltfile,
negate_left = False, scale_exp = 9, npix = 2000,
dens_range = [1.e-4, 1.e8], temp_range = [1.e7, 1.e10],
x_ticks = [2.0e9, 4.0e9], y_ticks = [2.0e9, 4.0e9],
n_dens_ticks = 4, n_temp_ticks = 4,
do_ts_te_contours = False, ts_te_contours = [0.1],
domain_frac = 1.0):
"""Create a slice plot of rho and T using yt.
negate_left: use negative tick marks for the left panel.
scale_exp: the exponent (in base 10) used for scaling the axes.
npix: number of pixels to use in the buffer for generating the image.
dens_range: range of densities to plot.
temp_range: range of temperatures to plot.
n_dens_ticks: number of tick marks on the density colorbar.
n_temp_ticks: number of tick marks on the temperature colorbar.
"""
import numpy as np
import yt
import matplotlib
import matplotlib.pyplot as plt
from yt.visualization.api import get_multi_plot
from matplotlib.colors import LogNorm
# Complain if the file type is not EPS
if output_filename[-3:] != "eps":
print("Error: expecting an EPS output file.")
return
# Update font size as first action so that it takes effect immediately
matplotlib.rcParams.update({'font.size': 20})
fig, axes, colorbars = get_multi_plot(2, 1, colorbar = 'horizontal', bw = 6)
ds = yt.load(pltfile)
dim = ds.dimensionality
buff = [npix, npix]
bounds = [domain_frac * ds.domain_left_edge[0], domain_frac * ds.domain_right_edge[0],
domain_frac * ds.domain_left_edge[1], domain_frac * ds.domain_right_edge[1]]
if dim == 1:
print("This slice plot routine is not implemented in one dimension.")
exit
elif dim == 2:
slc = ds.slice(2, 0.0)
elif dim == 3:
slc = ds.slice(2, 0.0)
frb = yt.visualization.fixed_resolution.FixedResolutionBuffer(slc, bounds=bounds, buff_size=buff)
dens_axis = axes[0][0]
temp_axis = axes[0][1]
scale = domain_frac / 10**scale_exp
if negate_left:
dens_axis.set_xticks([-x_ticks[1] * scale, -x_ticks[0] * scale])
else:
if dim == 2:
dens_axis.set_xticks([x_ticks[1] * scale, x_ticks[0] * scale])
else:
dens_axis.set_xticks([-x_ticks[1] * scale, -x_ticks[0] * scale,
0.0,
x_ticks[0] * scale, x_ticks[1] * scale])
if dim == 2 and ds.parameters["-y"] == "symmetry":
dens_axis.set_yticks([0.0 * scale,
y_ticks[0] * scale,
y_ticks[1] * scale])
else:
dens_axis.set_yticks([-y_ticks[1] * scale, -y_ticks[0] * scale,
0.0 * scale,
y_ticks[0] * scale, y_ticks[1] * scale])
if dim == 2:
temp_axis.set_xticks([0.0 * scale,
x_ticks[0] * scale,
x_ticks[1] * scale])
else:
temp_axis.set_xticks([-x_ticks[1] * scale, -x_ticks[0] * scale,
0.0,
x_ticks[0] * scale, x_ticks[1] * scale])
if dim == 2 and ds.parameters["-y"] == "symmetry":
temp_axis.set_yticks([0.0 * scale, y_ticks[0] * scale, y_ticks[1] * scale])
else:
temp_axis.set_yticks([-y_ticks[1] * scale, -y_ticks[0] * scale,
0.0 * scale,
y_ticks[0] * scale, y_ticks[1] * scale])
dens_axis.yaxis.tick_left()
dens_axis.yaxis.set_label_position("left")
temp_axis.yaxis.tick_right()
temp_axis.yaxis.set_label_position("right")
dens = np.array(frb['density'])
temp = np.array(frb['Temp'])
plots = []
aspect = 1.0
if negate_left:
left_bound = [bounds[0].v * scale, -bounds[1].v * scale, bounds[2].v * scale, bounds[3].v * scale]
else:
if dim == 2 and ds.parameters["-y"] == "symmetry":
left_bound = [bounds[0].v * scale, bounds[1].v * scale, bounds[3].v * scale, bounds[2].v * scale]
else:
left_bound = [bounds[0].v * scale, bounds[1].v * scale, bounds[2].v * scale, bounds[3].v * scale]
plots.append(dens_axis.imshow(dens, norm=LogNorm(), extent=left_bound, aspect=aspect))
plots[-1].set_clim(dens_range[0], dens_range[1])
plots[-1].set_cmap('bone')
if dim == 2 and ds.parameters["-y"] == "symmetry":
time_position = [0.115, 0.725]
else:
time_position = [0.150, 0.785]
dens_axis.annotate("t = {:.2f} s".format(float(ds.current_time.d)), time_position,
xycoords='figure fraction', color='white', fontsize=20)
if dim == 2 and ds.parameters["-y"] == "symmetry":
right_bound = [bounds[0].v * scale, bounds[1].v * scale, bounds[3].v * scale, bounds[2].v * scale]
else:
right_bound = [bounds[0].v * scale, bounds[1].v * scale, bounds[2].v * scale, bounds[3].v * scale]
plots.append(temp_axis.imshow(temp, norm=LogNorm(), extent=right_bound, aspect=aspect))
plots[-1].set_clim(temp_range[0], temp_range[1])
plots[-1].set_cmap("hot")
if do_ts_te_contours:
ts_te = np.array(frb['t_sound_t_enuc'])
temp_axis.contour(ts_te, levels=ts_te_contours, extent=right_bound)
dens_ticks = np.logspace(np.log10(dens_range[0]), np.log10(dens_range[1]), num=n_dens_ticks)
cb_dens = fig.colorbar(plots[0], cax=colorbars[0], ax=dens_axis, orientation='horizontal', ticks=dens_ticks)
cb_dens.solids.set_rasterized(True)
cb_dens.set_label(r'$\mathrm{Density}\ (\mathrm{g\ cm^{-3}})$')
temp_ticks = np.logspace(np.log10(temp_range[0]), np.log10(temp_range[1]), num=n_temp_ticks)
cb_temp = fig.colorbar(plots[1], cax=colorbars[1], ax=temp_axis, orientation='horizontal', ticks=temp_ticks)
cb_temp.solids.set_rasterized(True)
cb_temp.set_label(r'$\mathrm{Temperature}\ (\mathrm{K})$')
if dim == 2:
dens_axis.set_xlim(dens_axis.get_xlim()[::-1])
if ds.parameters["-y"] == "symmetry":
# Only simulating one star; flip the axis
dens_axis.set_ylim(dens_axis.get_ylim()[::-1])
temp_axis.set_ylim(temp_axis.get_ylim()[::-1])
if dim == 2:
if ds.parameters["-y"] == "symmetry":
dens_axis.set_position([0.125, 0.075, 0.375, 0.75])
temp_axis.set_position([0.500, 0.075, 0.375, 0.75])
colorbars[0].set_position([0.2125, 0.825, 0.2, 0.075])
colorbars[1].set_position([0.5875, 0.825, 0.2, 0.075])
else:
dens_axis.set_position([0.125 + 0.0575, 0.075, 0.375, 0.75])
temp_axis.set_position([0.500 + 0.0575, 0.075, 0.375, 0.75])
colorbars[0].set_position([0.275, 0.92, 0.2, 0.075])
colorbars[1].set_position([0.525, 0.92, 0.2, 0.075])
else:
dens_axis.set_position([0.125, 0.075, 0.375, 0.75])
temp_axis.set_position([0.500, 0.075, 0.375, 0.75])
colorbars[0].set_position([0.2125, 0.92, 0.2, 0.075])
colorbars[1].set_position([0.5875, 0.92, 0.2, 0.075])
if dim == 2:
dens_axis.set_xlabel(r'$r\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
temp_axis.set_xlabel(r'$r\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
dens_axis.set_ylabel(r'$z\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
temp_axis.set_ylabel(r'$z\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
else:
dens_axis.set_xlabel(r'$x\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
temp_axis.set_xlabel(r'$x\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
dens_axis.set_ylabel(r'$y\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
temp_axis.set_ylabel(r'$y\ (10^{}\ \mathrm{{cm}})$'.format('{' + str(scale_exp) + '}'))
# Save as EPS
fig.savefig(output_filename, bbox_inches='tight')
insert_commits_into_eps(output_filename, pltfile, 'plot')
# Save as PNG
fig.savefig(output_filename.replace('eps', 'png'), bbox_inches='tight')
plt.close()
# A routine for doing axis-aligned slice plots over a given field.
def slice_plot(field, output_filename, pltfile, idir = 3):
"""Create an axis-aligned slice plot over a given field with yt."""
import matplotlib
matplotlib.use('agg')
import yt
import matplotlib.pyplot as plt
ds = yt.load(pltfile)
dim = ds.dimensionality
fields = [field]
if dim == 1:
print("This slice plot routine is not implemented in one dimension.")
exit
elif dim == 2:
if idir == 1:
axis = 'r'
elif idir == 2:
axis = 'z'
elif idir == 3:
axis = 'theta'
else:
print("Unknown direction for slicing in slice_plot.")
exit
elif dim == 3:
if idir == 1:
axis = 'x'
elif idir == 2:
axis = 'y'
elif idir == 3:
axis = 'z'
else:
print("Unknown direction for slicing in slice_plot.")
sp = yt.SlicePlot(ds, axis, fields=fields)
sp.set_cmap(field, 'hot')
plot = sp.plots[field]
cb = plot.cb
cb.solids.set_rasterized(True)
sp._setup_plots()
plt.savefig(output_filename)
insert_commits_into_eps(output_filename, pltfile, 'plot')
plt.savefig(output_filename[:-4] + '.png')
plt.close()
# A routine for doing axis-aligned multi-panel slice plots over a given field.
def multipanel_slice_plot(field, output_filename, pltfiles, idir = 3,
zlim = None, colormap = 'hot', scale_exp = 9,
nrows = 2, ncols = 2, axes_pad = 0.10,
zoom = 1.0,
xticks = None, yticks = None,
rect = [0.03,0.075,0.92,0.90],
annotate_time = False):
"""Create an axis-aligned multi-panel slice plot over a given field with yt."""
import yt
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
# Sanity check: are there enough plots to fill the axes grid?
if nrows * ncols != len(pltfiles):
print("Error: not enough plots for the multipanel slice plot.")
exit
fig = plt.figure()
# Set up the AxesGrid.
grid = AxesGrid(fig,
rect,
nrows_ncols = (nrows, ncols),
axes_pad = axes_pad,
label_mode = "L",
share_all = False,
cbar_location = "right",
cbar_mode = "single",
cbar_size = "5%",
cbar_pad = "1%")
for i, pltfile in enumerate(pltfiles):
ds = yt.load(pltfile)
dim = ds.dimensionality
fields = [field]
if dim == 1:
print("This slice plot routine is not implemented in one dimension.")
exit
elif dim == 2:
if idir == 1:
axis = 'r'
elif idir == 2:
axis = | |
"""
Copyright 2018 Duo Security
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
"""
import json
import operator
import itertools
import argparse
import pyjq
import copy
import urllib.parse
from netaddr import IPNetwork, IPAddress
from shared.common import get_account, get_regions, is_external_cidr
from shared.query import query_aws, get_parameter_file
from shared.nodes import (
Account,
Region,
Vpc,
Az,
Subnet,
Ec2,
Elb,
Elbv2,
Rds,
VpcEndpoint,
Ecs,
Lambda,
Redshift,
ElasticSearch,
Cidr,
Connection,
)
__description__ = "Generate network connection information file"
MUTE = False
def log(msg):
if MUTE:
return
print(msg)
def get_vpcs(region, outputfilter):
vpc_filter = ""
if "vpc-ids" in outputfilter:
vpc_filter += " | select (.VpcId | contains({}))".format(
outputfilter["vpc-ids"]
)
if "vpc-names" in outputfilter:
vpc_filter += ' | select(.Tags != null) | select (.Tags[] | (.Key == "Name") and (.Value | contains({})))'.format(
outputfilter["vpc-names"]
)
vpcs = query_aws(region.account, "ec2-describe-vpcs", region)
return pyjq.all(".Vpcs[]?{}".format(vpc_filter), vpcs)
def get_azs(vpc):
azs = query_aws(vpc.account, "ec2-describe-availability-zones", vpc.region)
resource_filter = ".AvailabilityZones[]"
return pyjq.all(resource_filter, azs)
def get_vpc_peerings(region):
vpc_peerings = query_aws(
region.account, "ec2-describe-vpc-peering-connections", region
)
resource_filter = ".VpcPeeringConnections[]?"
return pyjq.all(resource_filter, vpc_peerings)
def get_subnets(az):
subnets = query_aws(az.account, "ec2-describe-subnets", az.region)
resource_filter = (
'.Subnets[] | select(.VpcId == "{}") | select(.AvailabilityZone == "{}")'
)
return pyjq.all(resource_filter.format(az.vpc.local_id, az.local_id), subnets)
def get_ec2s(region):
instances = query_aws(region.account, "ec2-describe-instances", region.region)
resource_filter = '.Reservations[]?.Instances[] | select(.State.Name == "running")'
return pyjq.all(resource_filter, instances)
def get_elbs(region):
load_balancers = query_aws(
region.account, "elb-describe-load-balancers", region.region
)
return pyjq.all(".LoadBalancerDescriptions[]?", load_balancers)
def get_elbv2s(region):
# ALBs and NLBs
load_balancers = query_aws(
region.account, "elbv2-describe-load-balancers", region.region
)
return pyjq.all(".LoadBalancers[]?", load_balancers)
def get_vpc_endpoints(region):
endpoints = query_aws(region.account, "ec2-describe-vpc-endpoints", region.region)
return pyjq.all(".VpcEndpoints[]?", endpoints)
def get_rds_instances(region):
instances = query_aws(region.account, "rds-describe-db-instances", region.region)
return pyjq.all(".DBInstances[]?", instances)
def get_ecs_tasks(region):
tasks = []
clusters = query_aws(region.account, "ecs-list-clusters", region.region)
for clusterArn in clusters.get("clusterArns", []):
tasks_json = get_parameter_file(region, "ecs", "list-tasks", clusterArn)
for i in range(0, len(tasks_json["taskArns"]) // 100):
task_path = "account-data/{}/{}/{}/{}/{}".format(
region.account.name,
region.region.name,
"ecs-describe-tasks",
urllib.parse.quote_plus(clusterArn),
urllib.parse.quote_plus(f"describe_tasks_{i}")
)
cluster_tasks = json.load(open(task_path))
tasks += cluster_tasks["tasks"]
return tasks
def get_lambda_functions(region):
functions = query_aws(region.account, "lambda-list-functions", region.region)
return pyjq.all(".Functions[]?|select(.VpcConfig!=null)", functions)
def get_redshift(region):
clusters = query_aws(region.account, "redshift-describe-clusters", region.region)
return pyjq.all(".Clusters[]?", clusters)
def get_elasticsearch(region):
es_domains = []
domain_json = query_aws(region.account, "es-list-domain-names", region.region)
domains = pyjq.all(".DomainNames[]?", domain_json)
for domain in domains:
es = get_parameter_file(
region, "es", "describe-elasticsearch-domain", domain["DomainName"]
)["DomainStatus"]
if "VPCOptions" in es:
es_domains.append(es)
return es_domains
def get_sgs(vpc):
sgs = query_aws(vpc.account, "ec2-describe-security-groups", vpc.region)
return pyjq.all(
'.SecurityGroups[] | select(.VpcId == "{}")'.format(vpc.local_id), sgs
)
def get_external_cidrs(account, config):
external_cidrs = []
unique_cidrs = {}
for region in account.children:
for vpc in region.children:
sgs = get_sgs(vpc)
# Get external IPs
for sg in sgs:
cidrs = pyjq.all(".IpPermissions[].IpRanges[].CidrIp", sg)
for cidr in cidrs:
unique_cidrs[cidr] = 1
# Remove private CIDR ranges
for cidr in unique_cidrs.keys():
if is_external_cidr(cidr):
# It's something else, so add it
external_cidrs.append(Cidr(cidr, get_cidr_name(cidr, config)))
return external_cidrs
def get_cidr_name(cidr, config):
return config["cidrs"].get(cidr, {}).get("name", None)
def add_connection(connections, source, target, reason):
reasons = connections.get(Connection(source, target), [])
reasons.append(reason)
connections[Connection(source, target)] = reasons
def get_connections(cidrs, vpc, outputfilter):
"""
For a VPC, for each instance, find all of the other instances that can connect to it,
including those in peered VPCs.
Note I do not consider subnet ACLs, routing tables, or some other network concepts.
"""
connections = {}
# Get mapping of security group names to nodes that have that security group
sg_to_instance_mapping = {}
for instance in vpc.leaves:
for sg in instance.security_groups:
sg_to_instance_mapping.setdefault(sg, {})[instance] = True
# For each security group, find all the instances that are allowed to connect to instances
# within that group.
for sg in get_sgs(vpc):
# Get the CIDRs that are allowed to connect
for cidr in pyjq.all(".IpPermissions[].IpRanges[].CidrIp", sg):
if not is_external_cidr(cidr):
# This is a private IP, ex. 10.0.0.0/16
# See if we should skip this
if not outputfilter.get("internal_edges", True):
continue
# Find all instances in this VPC and peered VPCs that are in this CIDR
for sourceVpc in itertools.chain(vpc.peers, (vpc,)):
# Ensure it is possible for instances in this VPC to be in the CIDR
if not (
IPNetwork(sourceVpc.cidr) in IPNetwork(cidr)
or IPNetwork(cidr) in IPNetwork(sourceVpc.cidr)
):
# The CIDR from the security group does not overlap with the CIDR of the VPC,
# so skip it
continue
# For each instance, check if one of its IPs is within the CIDR
for sourceInstance in sourceVpc.leaves:
for ip in sourceInstance.ips:
if IPAddress(ip) in IPNetwork(cidr):
# Instance found that can connect to instances in the SG
# So connect this instance (sourceInstance) to every instance
# in the SG.
for targetInstance in sg_to_instance_mapping.get(
sg["GroupId"], {}
):
add_connection(
connections, sourceInstance, targetInstance, sg
)
else:
# This is an external IP (ie. not in a private range).
for instance in sg_to_instance_mapping.get(sg["GroupId"], {}):
# Ensure it has a public IP, as resources with only private IPs can't be reached
if instance.is_public:
cidrs[cidr].is_used = True
add_connection(connections, cidrs[cidr], instance, sg)
else:
if cidr == "0.0.0.0/0":
# Resource is not public, but allows anything to access it,
# so mark set all the resources in the VPC as allowing access to it.
for source_instance in vpc.leaves:
add_connection(
connections, source_instance, instance, sg
)
if outputfilter.get("internal_edges", True):
# Connect allowed in Security Groups
for ingress_sg in pyjq.all(
".IpPermissions[].UserIdGroupPairs[].GroupId", sg
):
# We have an SG and a list of SG's it allows in
for target in sg_to_instance_mapping.get(sg["GroupId"], {}):
# We have an instance and a list of SG's it allows in
for source in sg_to_instance_mapping.get(ingress_sg, {}):
if (
not outputfilter.get("inter_rds_edges", True)
and (
source.node_type == "rds"
or source.node_type == "rds_rr"
)
and (
target.node_type == "rds"
or target.node_type == "rds_rr"
)
):
continue
add_connection(connections, source, target, sg)
# Connect everything to the Gateway endpoints
for targetResource in vpc.leaves:
if targetResource.has_unrestricted_ingress:
for sourceVpc in itertools.chain(vpc.peers, (vpc,)):
for sourceResource in sourceVpc.leaves:
add_connection(connections, sourceResource, targetResource, [])
# Remove connections for source nodes that cannot initiate traffic (ex. VPC endpoints)
for connection in list(connections):
if not connection.source.can_egress:
del connections[connection]
return connections
def add_node_to_subnets(region, node, nodes):
"""
Given a node, find all the subnets it thinks it belongs to,
and duplicate it and add it a child of those subnets
"""
# Remove node from dictionary
del nodes[node.arn]
# Add a new node (potentially the same one) back to the dictionary
for vpc in region.children:
if len(node.subnets) == 0 and node._parent and vpc.local_id == node._parent.local_id:
# VPC Gateway Endpoints (S3 and DynamoDB) reside in a VPC, not a subnet
# So set the relationship between the VPC and the node
nodes[node.arn] = node
vpc.addChild(node)
break
for az in vpc.children:
for subnet in az.children:
for node_subnet in node.subnets:
if node_subnet == subnet.local_id:
# Copy the node
subnet_node = copy.copy(node)
# Set the subnet name on the copy, and potentially a new arn
subnet_node.set_subnet(subnet)
# Add to the set
nodes[subnet_node.arn] = subnet_node
subnet.addChild(subnet_node)
def get_resource_nodes(region, outputfilter):
nodes = {}
# EC2 nodes
for ec2_json in get_ec2s(region):
node = Ec2(
region,
ec2_json,
outputfilter.get("collapse_by_tag", False),
outputfilter.get("collapse_asgs", False),
)
nodes[node.arn] = node
# RDS nodes
for rds_json in get_rds_instances(region):
node = Rds(region, rds_json)
if not outputfilter.get("read_replicas", False) and node.node_type == "rds_rr":
continue
nodes[node.arn] = node
# ELB nodes
for elb_json in | |
""" File utils base class for NXOS devices. """
import logging
# Parent inheritance
from .. import FileUtils as FileUtilsDeviceBase
# Dir parser
try:
from genie.libs.parser.nxos.show_platform import Dir
except ImportError:
# For apidoc building only
from unittest.mock import Mock; Dir=Mock()
logger = logging.getLogger(__name__)
class FileUtils(FileUtilsDeviceBase):
def copyfile(self, source, destination, timeout_seconds=300,
vrf=None, compact=False, use_kstack=False, *args, **kwargs):
""" Copy a file to/from NXOS device
Copy any file to/from a device to any location supported on the
device and on the running-configuration.
Parameters
----------
source: `str`
Full path to the copy 'from' location
destination: `str`
Full path to the copy 'to' location
timeout_seconds: `str`
The number of seconds to wait before aborting the operation
vrf: `str`
Vrf to be used during copy operation
compact: `bool`
Compress image during copy operation
use_kstack: `bool`
Use faster version during copy operation
Not supported with a file transfer protocol
prompting for a username and password
Returns
-------
`None`
Raises
------
Exception
When a device object is not present or device execution
encountered an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instantiate a filetransferutils instance for NXOS device
>>> from pyats.utils.fileutils import FileUtils
>>> fu_device = FileUtils.from_device(device)
# copy file from device to server
>>> fu_device.copyfile(
... source='flash:/memleak.tcl',
... destination='ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl',
... timeout_seconds='300', device=device)
# copy file from server to device
>>> fu_device.copyfile(
... source='ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl',
... destination='flash:/new_file.tcl',
... timeout_seconds='300', device=device)
"""
# use a device passed as an argument, or the device saved as an
# attribute
device = kwargs.get('device') or getattr(self, 'device', None)
# update source and destination with the valid address from testbed
source = self.validate_and_update_url(source,
device=device,
vrf=vrf,
cache_ip=kwargs.get(
'cache_ip', True))
destination = self.validate_and_update_url(destination,
device=device,
vrf=vrf,
cache_ip=kwargs.get(
'cache_ip', True))
if vrf is None:
logger.warning('Using default vrf "management" for NXOS. This '
'default will change to None in v21.4. Please '
'explicitly specify "management" to continue using '
'this vrf.')
vrf = 'management'
# copy flash:/memleak.tcl ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl vrf management
if vrf:
# for n9k only
if compact:
cmd = 'copy {f} {t} compact vrf {vrf}'.format(f=source,
t=destination,
vrf=vrf)
else:
cmd = 'copy {f} {t} vrf {vrf}'.format(f=source,
t=destination,
vrf=vrf)
else:
if compact:
cmd = 'copy {f} {t} compact'.format(f=source, t=destination)
else:
cmd = 'copy {f} {t}'.format(f=source, t=destination)
# for n9k only
if use_kstack:
cmd += ' use-kstack'
# Extract the server address to be used later for authentication
used_server = self.get_server(source, destination)
return super().copyfile(source=source, destination=destination,
timeout_seconds=timeout_seconds, cmd=cmd, used_server=used_server,
vrf=vrf, *args, **kwargs)
def dir(self, target, timeout_seconds=300, *args, **kwargs):
""" Retrieve filenames contained in a directory.
Do not recurse into subdirectories, only list files at the top level
of the given directory.
Parameters
----------
target : `str`
The directory whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
Returns
-------
`dict` : Dict of filename URLs and the corresponding info (ex:size)
Raises
------
AttributeError
device object not passed in the function call
Exception
Parser encountered an issue
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# list all files on the device directory 'bootflash:'
>>> directory_output = fu_device.dir(target='bootflash:',
... timeout_seconds=300, device=device)
>>> directory_output
['bootflash:/virt_strg_pool_bf_vdc_1/',
'bootflash:/platform-sdk.cmd', 'bootflash:/.swtam/',
'bootflash:/virtual-instance/', 'bootflash:/nxos.7.0.3.I7.1.bin',
'bootflash:/virtual-instance.conf', 'bootflash:/scripts/',
'bootflash:/memleak.tcl', 'bootflash:/acfg_base_running_cfg_vdc1',
'bootflash:/.rpmstore/']
"""
dir_output = super().parsed_dir(target, timeout_seconds,
Dir, *args, **kwargs)
# Extract the files location requested
output = self.parse_url(target)
# Construct the directory name
directory = output.scheme + ":/"
# Create a new list to return
new_list = []
for key in dir_output['files']:
new_list.append(directory+key)
return new_list
def stat(self, target, timeout_seconds=300, *args, **kwargs):
""" Retrieve file details such as length and permissions.
Parameters
----------
target : `str`
The URL of the file whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
Returns
-------
`file_details` : File details including size, permissions, index
and last modified date.
Raises
------
AttributeError
device object not passed in the function call
Exception
Parser encountered an issue
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# list the file details on the device 'flash:' directory
>>> directory_output = fu_device.stat(target='flash:memleak.tcl',
... timeout_seconds=300, device=device)
>>> directory_output['size']
>>> directory_output['permissions']
(Pdb) directory_output
{'last_modified_date': 'Mar 20 2018 10:26:01 +00:00',
'size': '104260', 'permissions': '-rw-', 'index': '69705'}
"""
files = super().stat(target, timeout_seconds, Dir, *args, **kwargs)
# Extract the file name requested
output = self.parse_url(target)
file_details = files['files'][output.path]
return file_details
def deletefile(self, target, timeout_seconds=300, *args, **kwargs):
""" Delete a file
Parameters
----------
target : `str`
The URL of the file whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
Returns
-------
None
Raises
------
Exception
When a device object is not present or device execution
encountered an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# delete a specific file on device directory 'flash:'
>>> directory_output = fu_device.deletefile(
... target='flash:memleak_bckp.tcl',
... timeout_seconds=300, device=device)
"""
super().deletefile(target, timeout_seconds, *args, **kwargs)
def renamefile(self, source, destination, timeout_seconds=300, *args,
**kwargs):
""" Rename a file
Parameters
----------
source : `str`
The URL of the file to be renamed.
destination : `str`
The URL of the new file name.
timeout_seconds : `int`
Maximum allowed amount of time for the operation.
Returns
-------
None
Raises
------
Exception
When a device object is not present or device execution
encountered an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# rename the file on the device 'bootflash:' directory
>>> fu_device.renamefile(target='bootflash:memleak.tcl',
... destination='memleak_backup.tcl'
... timeout_seconds=300, device=device)
"""
# move bootflash:memleak.tcl memleak_j.tcl
cmd = 'move {f} {u}'.format(f=source, u=destination)
super().renamefile(source, destination, timeout_seconds, cmd,
*args, **kwargs)
def chmod(self, target, mode, timeout_seconds=300, *args, **kwargs):
""" Change file permissions
Parameters
----------
target : `str`
The URL of the file whose permissions are to be changed.
mode : `int`
Same format as `os.chmod`.
timeout_seconds : `int`
Maximum allowed amount of time for the operation.
Returns
-------
`None` if operation succeeded.
"""
raise NotImplementedError("The fileutils module {} "
"does not implement chmod.".format(self.__module__))
def validateserver(self, target, timeout_seconds=300,
vrf='management', *args, **kwargs):
''' Make sure that the given server information is valid
Function that verifies if the server information given is valid, and if
the device can connect to it. It does this by saving `show clock`
output to a particular file using transfer protocol. Then deletes the
file.
Parameters
----------
target (`str`): File path including the protocol,
server and file location.
timeout_seconds (`str`):
The number of seconds to wait before aborting the operation.
vrf (`str`):
Vrf value to be used during execution. Default is `management`
Returns
-------
`None`
Raises
------
Exception: If the command from the device to server is unreachable
or the protocol used doesn't support remote checks.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# Validate server connectivity
>>> fu_device.validateserver(
... target='ftp://10.1.7.250//auto/tftp-ssr/show_clock',
... timeout_seconds=300, device=device)
'''
# Extract the server address to be used later for authentication
used_server = self.get_server(target)
# Patch up the command together
# show clock > tftp://10.1.0.213//auto/ftp-ssr/show_clock vrf management
cmd = "show clock > {e} vrf {vrf}".format(e=target, vrf=vrf)
super().validateserver(cmd=cmd, target=target,
timeout_seconds=timeout_seconds, used_server=used_server, *args,
**kwargs)
def copyconfiguration(self, source, destination, timeout_seconds=300,
vrf='management', *args, **kwargs):
""" Copy configuration to/from device
Copy configuration on the device or between locations supported on the
device and on the server.
Parameters
----------
source: `str`
Full path to the copy 'from' location
destination: `str`
Full path to the copy 'to' location
timeout_seconds: `str`
The number of seconds to wait before aborting the operation
vrf: `str`
Vrf to be used during copy operation
Returns
-------
`None`
Raises
------
Exception
When a device object is not present or device execution
encountered an unexpected behavior.
| |
<reponame>SFGLab/ChromoLooping<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zparteka
"""
from peak_stats.reader.peaks import Image, Group, Peak
from scipy.spatial import ConvexHull
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
# Histograms global options
Figsize = (30, 20)
LabelFontsize = 35
TitleFontsize = 40
TickFontsize = 30
Facecolor = "g"
Alpha = 0.75
Dpi = 300
# 3D plots global options
Color = "red"
class ImgStats:
def __init__(self, image: Image):
self.group_count = image.group_count()
self.peak_count = image.peak_count()
self.avg_group_sigma_xyz = self.add_groups_sigma(image)
self.avg_peaks = self.avg_peaks_per_group(image)
self.photons = self.photons_in_image(image=image)
self.photons_per_group = self.average_photons_per_group(image)
@staticmethod
def add_groups_sigma(image: Image):
"""Calculate sigma for x y and z"""
avg_groups_sigma_z = []
avg_groups_sigma_x = []
avg_groups_sigma_y = []
for group in image.groups:
new_group_stat = GroupStats(group)
avg_groups_sigma_x.append(new_group_stat.group_avg_sigma_x_pos_full)
avg_groups_sigma_y.append(new_group_stat.group_avg_sigma_y_pos_full)
avg_groups_sigma_z.append(new_group_stat.group_avg_sigma_z)
return [avg_groups_sigma_x, avg_groups_sigma_y, avg_groups_sigma_z]
@staticmethod
def add_peak_sigma(image: Image):
"""Return peaks sigma in format[[simgax_peka1,...],[simgay_peak1...],[sigmaz_peak1]]"""
sigma_x = []
sigma_y = []
sigma_z = []
for group in image.groups:
group_stats = GroupStats(group=group)
sigma_x += group_stats.list_peak_sigma_x(group=group)
sigma_y += group_stats.list_peak_sigma_y(group=group)
sigma_z += group_stats.list_peak_sigma_z(group=group)
return [sigma_x, sigma_y, sigma_z]
@staticmethod
def peaks_per_group(image: Image):
peaks_in_spots = []
for spot in image.groups:
peaks_in_spots.append(len(spot))
return peaks_in_spots
def avg_peaks_per_group(self, image: Image):
peaks = self.peaks_per_group(image)
avg = sum(peaks) / len(peaks)
return avg
def average_sigma_x(self):
average_sigma_x = sum(self.avg_group_sigma_xyz[0]) / len(self.avg_group_sigma_xyz[0])
return average_sigma_x
def average_sigma_y(self):
average_sigma_y = sum(self.avg_group_sigma_xyz[1]) / len(self.avg_group_sigma_xyz[1])
return average_sigma_y
def average_sigma_z(self):
average_sigma_z = sum(self.avg_group_sigma_xyz[2]) / len(self.avg_group_sigma_xyz[2])
return average_sigma_z
def average_sigma(self):
average = (sum(self.avg_group_sigma_xyz[2]) / len(self.avg_group_sigma_xyz[2]) + sum(
self.avg_group_sigma_xyz[1]) / len(self.avg_group_sigma_xyz[1]) + sum(self.avg_group_sigma_xyz[0]) / len(
self.avg_group_sigma_xyz[0])) / 3
return average
def plot_sigma_x_hist(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
plt.hist(self.avg_group_sigma_xyz[0], 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Sigma X', fontsize=LabelFontsize)
plt.title('Sigma X', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_sigma_X"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_sigma_y_hist(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
plt.hist(self.avg_group_sigma_xyz[1], 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Sigma Y', fontsize=LabelFontsize)
plt.title('Sigma Y', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_sigma_Y"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_sigma_z_hist(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
plt.hist(self.avg_group_sigma_xyz[2], 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Sigma Z', fontsize=LabelFontsize)
plt.title('Sigma Z', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_sigma_Z"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_peak_sigma_x(self, x_sigma, save=False, outdir=None, draw_sigma=15):
plt.figure(figsize=Figsize)
plt.hist(x_sigma, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Peaks', fontsize=LabelFontsize)
plt.xlabel('Sigma X', fontsize=LabelFontsize)
plt.title('Sigma X', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if draw_sigma:
plt.axvline(x=draw_sigma, color="red")
if save:
outfile = outdir + "_sigma_X_peaks_g"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_peak_sigma_y_(self, y_sigma, save=False, outdir=None, draw_sigma=15):
plt.figure(figsize=Figsize)
plt.hist(y_sigma, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Peaks', fontsize=LabelFontsize)
plt.xlabel('Sigma Y', fontsize=LabelFontsize)
plt.title('Sigma Y', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if draw_sigma:
plt.axvline(x=draw_sigma, color="red")
if save:
outfile = outdir + "_sigma_Y_peaks_g"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_peak_sigma_z(self, sigma_z, save=False, outdir=None, draw_sigma=15):
plt.figure(figsize=Figsize)
plt.hist(sigma_z, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Peaks', fontsize=LabelFontsize)
plt.xlabel('Sigma Z', fontsize=LabelFontsize)
plt.title('Sigma Z', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if draw_sigma:
plt.axvline(x=draw_sigma, color="red")
if save:
outfile = outdir + "_sigma_Z_peaks_g"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_average_peak_sigma(self, sigma_z, sigma_y, sigma_x, save=False, outdir=None, draw_sigma=15):
avg = [0] * len(sigma_x)
for i in range(len(sigma_x)):
avg[i] += sigma_x[i]
avg[i] += sigma_y[i]
avg[i] += sigma_z[i]
avg[i] /= 3
plt.figure(figsize=Figsize)
plt.hist(avg, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Peaks', fontsize=70)
plt.xlabel('Average sigma', fontsize=70)
plt.title('Average Peak Sigma', fontsize=100)
plt.tick_params(axis='both', which='major', labelsize=50)
plt.grid(True)
if draw_sigma:
plt.axvline(x=draw_sigma, color="red")
if save:
outfile = outdir + "_average_peaks_g_v2"
plt.savefig(outfile, dpi=100, format="png")
else:
plt.show()
plt.close()
def plot_average_sigma(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
data = np.array(self.avg_group_sigma_xyz)
avg = np.average(data, axis=0)
plt.hist(avg, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Average Sigma', fontsize=LabelFontsize)
plt.title('Groups Average Sigma', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_sigma_avg"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_peaks_per_group_histogram(self, image: Image, save=False, outdir=None):
plt.figure(figsize=Figsize)
data = self.peaks_per_group(image=image)
plt.hist(data, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Number of Peaks', fontsize=LabelFontsize)
plt.title('Peaks per Group histogram', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.minorticks_on()
plt.grid(True)
if save:
outfile = outdir + "_peaks_per_group"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
def plot_photons_per_group(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
plt.hist(self.photons_per_group, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Groups', fontsize=LabelFontsize)
plt.xlabel('Average Number of Photons', fontsize=LabelFontsize)
plt.title('Average number of photons per peak in group', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_photons_per_group"
plt.savefig(outfile, dpi=Dpi, formay="png")
else:
plt.show()
plt.close()
def plot_photons_per_peaks(self, save=False, outdir=None):
plt.figure(figsize=Figsize)
plt.hist(self.photons, 50, facecolor=Facecolor, alpha=Alpha)
plt.ylabel('Number of Peaks', fontsize=LabelFontsize)
plt.xlabel('Number of Photons', fontsize=LabelFontsize)
plt.title('Number of photons per peak', fontsize=TitleFontsize)
plt.tick_params(axis='both', which='major', labelsize=TickFontsize)
plt.grid(True)
if save:
outfile = outdir + "_photons_per_image"
plt.savefig(outfile, dpi=Dpi, format="png")
else:
plt.show()
plt.close()
pass
def save_statistics(self, output, image: Image, sigma_threshold):
with open(output, 'w') as out:
peaks = PeakPositions(image=image, sigma_threshold=sigma_threshold)
groups = GroupPeakStats(image=image)
peaks.plot_convex_hull(show=False)
out.write("Group Count: {}".format(self.group_count) + "\n")
out.write("Peak Count: {}".format(self.peak_count) + "\n")
out.write("Average peaks per group {}".format(self.peak_count / self.group_count) + "\n")
out.write("Average sigma X: {} nm".format(self.average_sigma_x()) + "\n")
out.write("Average sigma Y: {} nm".format(self.average_sigma_y()) + "\n")
out.write("Average sigma Z: {} nm".format(self.average_sigma_z()) + "\n")
out.write("Convex hull volume: {}".format(peaks.hull_volume) + "\n")
out.write("Convex hull area: {}".format(peaks.hull_area) + "\n")
out.write("Average number of photons per peak: {}".format(sum(self.photons) / len(self.photons)) + "\n")
out.write("Average number of photons per group peak: {}".format(groups.average_photons()) + "\n")
out.write("Average sigma X per group peaks: {} nm".format(groups.averege_sigma_x()) + "\n")
out.write("Average sigma Y per group peaks: {} nm".format(groups.averege_sigma_y()) + "\n")
out.write("Average sigma Z per group peaks: {} nm".format(groups.averege_sigma_z()))
def print_statistics(self, image: Image, sigma_threshold=None):
peaks = PeakPositions(image=image, sigma_threshold=sigma_threshold)
groups = GroupPeakStats(image=image)
peaks.plot_convex_hull(show=False)
print("Group Count: {}".format(self.group_count))
print("Peak Count: {}".format(self.peak_count))
print("Average peaks per group {}".format(self.peak_count / self.group_count))
print("Average sigma X: {} nm".format(self.average_sigma_x()))
print("Average sigma Y: {} nm".format(self.average_sigma_y()))
print("Average sigma Z: {} nm".format(self.average_sigma_z()))
print("Convex hull volume: {}".format(peaks.hull_volume))
print("Convex hull area: {}".format(peaks.hull_area))
print("Average number of photons per peak: {}".format(sum(self.photons) / len(self.photons)))
print("Average number of photons per group peak: {}".format(groups.average_photons()))
print("Average sigma X per group peaks: {} nm".format(groups.averege_sigma_x()))
print("Average sigma Y per group peaks: {} nm".format(groups.averege_sigma_y()))
print("Average sigma Z per group peaks: {} nm".format(groups.averege_sigma_z()))
@staticmethod
def photons_in_image(image: Image):
"""Return a list of number of phontons in each peak in the image"""
photons_per_peak = []
for spot in image.groups:
photons_per_peak += GroupStats.photons(group=spot)
return photons_per_peak
@staticmethod
def average_photons_per_group(image: Image):
"""Return a list of average number of photons per spot"""
photons_per_spot = []
for spot in image.groups:
avg_photons = sum(GroupStats.photons(group=spot)) / len(spot)
photons_per_spot.append(avg_photons)
return photons_per_spot
class GroupStats:
def __init__(self, group: Group):
self.peak_count = len(group.peaks)
self.group_avg_sigma_x_pos_full = self.calculate_avg_sigma_x(group)
self.group_avg_sigma_y_pos_full = self.calculate_avg_sigma_y(group)
self.group_avg_sigma_z = self.calculate_avg_sigma_z(group)
def list_peak_sigma_x(self, group: Group, pixel_size=133):
sigma_x = []
for i in group.peaks:
sigma_x.append(float(i.data["Sigma X Pos Full"]) * pixel_size)
return sigma_x
def list_peak_sigma_y(self, group: Group, pixel_size=133):
sigma_y = []
for i in group.peaks:
sigma_y.append(float(i.data["Sigma Y Pos Full"]) * pixel_size)
return sigma_y
def list_peak_sigma_z(self, group: Group):
sigma_z = []
for i in group.peaks:
sigma_z.append(float(i.data["Sigma Z"]))
return sigma_z
@staticmethod
def calculate_avg_sigma_x(group: Group, pixel_size=133):
sum_sigma = 0
for i in group.peaks:
sum_sigma += (float(i.data["Sigma X Pos Full"]) * pixel_size)
avg_sigma = sum_sigma / len(group.peaks)
return avg_sigma
@staticmethod
def calculate_avg_sigma_y(group: Group, pixel_size=133):
sum_sigma = 0
for i in group.peaks:
sum_sigma += (float(i.data["Sigma Y Pos Full"]) * pixel_size)
avg_sigma = sum_sigma / len(group.peaks)
return avg_sigma
@staticmethod
def calculate_avg_sigma_z(group: Group):
sum_sigma = 0
for i in group.peaks:
sum_sigma += float(i.data["Sigma Z"])
avg_sigma = sum_sigma / len(group.peaks)
return avg_sigma
@staticmethod
def photons(group: Group):
""""return list of number of photons per peak in given spot"""
photons = []
for peak in group.peaks:
photon = peak.data["6 N Photons"]
photons.append(photon)
return photons
class PeakPositions:
def __init__(self, image: Image, sigma_threshold=1000, minimize=False):
self.peaks_positions = self.image_peak_positions(image, sigma_threshold)
self.parse_peak_positions_to_nm()
if minimize:
self.minimize_xy()
self.hull_volume = None
self.hull_area = None
@staticmethod
def single_peak_position(peak: Peak, sigma_threshold, pixel_size=133):
peak_x = peak.data["X Position"]
peak_y = peak.data["Y Position"]
peak_z = peak.data["Unwrapped Z"]
peak_position = [peak_x, peak_y, peak_z]
if sigma_threshold:
if peak.data["Sigma X Pos Full"] * pixel_size < sigma_threshold and peak.data[
"Sigma Y Pos Full"] * pixel_size < sigma_threshold and peak.data["Sigma Z"] < sigma_threshold:
return peak_position
else:
return None
return peak_position
def group_peak_positions(self, group: Group, sigma_threshold):
spot_peaks_positions = []
for peak in group.peaks:
peak_positions = self.single_peak_position(peak, sigma_threshold)
if peak_positions:
spot_peaks_positions.append(peak_positions)
return spot_peaks_positions
def image_peak_positions(self, image: Image, sigma_threshold):
image_peaks_positions = []
for spot in image.groups:
spot_peaks_positions = self.group_peak_positions(spot, sigma_threshold)
image_peaks_positions += spot_peaks_positions
return np.array(image_peaks_positions)
def parse_peak_positions_to_nm(self, pixel_size=133):
"""X/Y positions in ASCII file are in pixels and Z position is in nm. """
self.peaks_positions[:, 0] | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author: wowdd1
#mail: <EMAIL>
#data: 2014.12.07
from spider import *
sys.path.append("..")
from utils import Utils
class BaikeSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.subject = 'rank'
self.school = "baike"
def processNobelprize(self):
subjects = ['physics', 'chemistry', 'medicine', 'literature', 'peace', 'economic-sciences']
for subject in subjects:
file_name = self.get_file_name(self.subject + "/nobel/" + subject, 'nobel')
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
r = requests.get('http://www.nobelprize.org/nobel_prizes/' + subject + '/laureates/')
soup = BeautifulSoup(r.text)
for div in soup.find_all('div', class_='by_year'):
if div.h3 == None or div.h6 == None or div.p == None:
continue
year = div.h3.text.strip()
year = year[year.rfind(' ') :].strip()
print year
soup2 = BeautifulSoup(div.prettify())
author = ""
desc = ""
for a in soup2.find_all('a'):
if a['href'].find('html') == -1:
continue
author += a.text.strip() + ', '
for p in soup2.find_all('p'):
if p.text.strip() == "":
continue
desc += p.text.strip().replace('\n', '').replace('"', '') + ", "
author = author[0 : len(author) - 2]
desc = desc[0 : len(desc) - 2]
#print div.h6.text.strip().replace(' and', ',')
#print div.p.text.strip().replace('\n', '').replace('"', '')
self.count += 1
self.write_db(f, 'nobel-' + subject + '-' + year, year + ' ' + author, 'http://www.nobelprize.org' + div.a['href'], 'winner:' + author + " description:" + desc)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processFieldsMedal(self, url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
desc = ''
winner = ''
year = ''
file_name = self.get_file_name(self.subject + "/fieldsmedal", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for tr in soup.find_all('tr'):
data = tr.text.split('\n')
id = data[1].strip()
if tr.td != None and tr.td.text.find('University') != -1:
break
soup2 = BeautifulSoup(tr.prettify())
if id.isdigit():
if data[3] == 'Awarded for' or data[len(data) - 2] == 'Presented by':
continue
if desc != '':
print winner[0 : len(winner) - 2]
print desc[0 : len(desc) - 2]
print ''
self.count += 1
self.write_db(f, "fieldsmedal-" + year, year + ' ' + winner[0 : len(winner) - 2], '', 'winner:' + winner[0 : len(winner) - 2] + ' description:' + desc[0 : len(desc) - 2])
winner = ''
desc = ''
year = ''
#print data[1] + ' ' + data[3]
year = id
winner += data[3] + ', '
desc += data[3] + ':' + data[len(data) - 2].replace('\n', '').replace('"', '') + ' '
continue
if (tr.td != None and tr.td.a != None and tr.td.a.has_key('title')):
#print data[1]
if data[1] == 'Awarded for' or tr.text.find('Presented by') != -1:
continue
winner += data[1] + ', '
desc += data[1] + ':' + data[len(data) - 2].replace('\n', '').replace('"', '') + ' '
if desc != '':
self.count += 1
self.write_db(f, "fieldsmedal-" + year, year + ' ' + winner[0 : len(winner) - 2], '', 'winner:' + winner[0 : len(winner) - 2] + ' description:' + desc[0 : len(desc) - 2])
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processWolfPrizeMathematics(self, url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
desc = ""
year = ''
winner = ''
file_name = self.get_file_name(self.subject + "/wolfprize-mathematics", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for tr in soup.find_all('tr'):
data = tr.text.split('\n')
#print data
if data[2].find('No award') != -1 or data[1].find('Year') != -1 or data[1].find('Citation') != -1:
continue
if data[1] == '':
break
if data[1].startswith('1') or data[1].startswith('2'):
if desc != '':
winner = winner[0 : len(winner) - 2]
desc = desc.strip()
print year + ' ' + winner
print desc
self.count += 1
self.write_db(f, 'wolfprize-math-' + year, year + ' ' + winner, '', 'winner:' + winner + ' description:' + desc)
year = ''
winner = ''
desc = ''
year = data[1].strip()
winner += data[2].strip() + ', '
desc += data[2].strip() + ':' + data[len(data) - 2].strip() + ' '
else:
winner += data[1].strip() + ', '
desc += data[2].strip() + ':' + data[len(data) - 2].strip() + ' '
if desc != '':
winner = winner[0 : len(winner) - 2]
desc = desc.strip()
print year + ' ' + winner
print desc
self.count += 1
self.write_db(f, 'wolfprize-math-' + year, year + ' ' + winner, '', 'winner:' + winner + ' description:' + desc)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processBreakthroughPrize(self):
data = {'1' : 'Fundamental-Physics',\
'2' : 'Life-Sciences',\
'3' : 'Mathematics'}
for k, v in data.items():
data2 = None
if k == '1':
data2 = {'P4' : 'Special-Breakthrough-Prize',\
'P2' : 'New-Horizons-Prize',\
'P3' : 'Physics-Frontiers-Prize',\
'P1' : 'Breakthrough-Prize'}
elif k == '2':
data2 = { 'P1' : 'Breakthrough-Prize'}
elif k == '3':
data2 = { 'P1' : 'Breakthrough-Prize',\
'P2' : 'New-Horizons-Prize'}
oldv = v
for k2, v2 in data2.items():
v = oldv
v = v + '-' + v2
r = requests.get('https://breakthroughprize.org/Laureates/' + k + '/' + k2)
soup = BeautifulSoup(r.text)
ul = None
for u in soup.find_all('ul', class_='filter'):
if u.li.text.strip().startswith('20'):
ul = u
break
soup2 = BeautifulSoup(ul.prettify())
years = []
for li in soup2.find_all('li'):
years.append(li.text.strip())
file_name = self.get_file_name(self.subject + "/breakthroughprize/" + v.lower(), 'breakthroughprize')
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for year in years:
url = 'https://breakthroughprize.org/Laureates/' + k +'/' + k2 + '/Y' + year
r = requests.get(url)
soup = BeautifulSoup(r.text)
ul = None
for u in soup.find_all('ul', class_='people'):
if u.li != None and u.li.span != None:
ul = u
break
soup2 = BeautifulSoup(ul.prettify())
winner = ''
for li in soup2.find_all('li'):
#print li.prettify()
title = li.span.a.text.strip()
if title.find('and the') != -1:
title = title[0 : title.find('and the')]
winner += title.strip() + ', '
winner = winner[0 : len(winner) - 2]
print winner
self.count += 1
self.write_db(f, 'breakthroughprize-' + v.lower() + '-' + year, year + ' ' + winner, url, 'winner:' + winner)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processBaikeData(self, url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.subject + "/programmer", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
p_all = soup.find_all("p")
for p in p_all:
if p.prettify()[0:60].find("<NAME>") != -1:
for line in p.prettify().replace('<br>', '').replace('</br>', '').replace('<br/>', '').replace('<p>', '').replace('</p>', '').split("\n"):
line = line.strip()
if line != "" and line != "# Name Description":
pos_1 = line.find(" ")
pos_2 = line.find(" ", pos_1 + 1)
pos_3 = line.find(" ", pos_2 + 1)
print line[0 : pos_1] + " " + line[pos_1 + 1 : pos_3] + " " + line[pos_3 + 1 :]
self.write_db(f, line[0 : pos_1], line[pos_1 + 1 : pos_3], "", 'description:' + line[pos_3 + 1 :])
self.count += 1
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processWikiTuringData(self, url):
file_name = self.get_file_name(self.subject + "/Turing-Award", self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
r = requests.get(url)
soup = BeautifulSoup(r.text)
for tr in soup.find_all("tr"):
if tr.th != None and tr.text.strip()[0:1] == "1" or tr.text.strip()[0:1] == "2":
i = 0
link = "http://en.wikipedia.org" + str(tr.td.a["href"])
year = ""
title = ""
remark = ""
for line in tr.text.strip().split("\n"):
i += 1
#print '---' + str(i) + " " + line
if i == 1:
year = line
continue
if len(line) < 50:
title += " " + line
continue
if line.startswith("For") or len(line) > 50:
#if i != 3:
print year + " " + title + " " + link
remark = 'winner:' + title.strip().replace(' | |
the
# drifts in the data streams. This drift handling is done in real time. The data received from this event could be used
# for an even more accurate drift adjustment in the post processing. Callbacks will receive a TimeSynchronizationData
# object or a dictionary with values if as_dictionary is True.
# See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
# <CodeExample>time_synchronization_data.py</CodeExample>
EYETRACKER_TIME_SYNCHRONIZATION_DATA = "eyetracker_time_synchronization_data"
_available_notification_subscriptions =\
{EYETRACKER_NOTIFICATION_CONNECTION_LOST: ConnectionLostData,
EYETRACKER_NOTIFICATION_CONNECTION_RESTORED: ConnectionRestoredData,
EYETRACKER_NOTIFICATION_CALIBRATION_MODE_ENTERED: CalibrationModeEnteredData,
EYETRACKER_NOTIFICATION_CALIBRATION_MODE_LEFT: CalibrationModeLeftData,
EYETRACKER_NOTIFICATION_CALIBRATION_CHANGED: CalibrationChangedData,
EYETRACKER_NOTIFICATION_TRACK_BOX_CHANGED: TrackBoxChangedData,
EYETRACKER_NOTIFICATION_DISPLAY_AREA_CHANGED: DisplayAreaChangedData,
EYETRACKER_NOTIFICATION_GAZE_OUTPUT_FREQUENCY_CHANGED: GazeOutputFrequencyChangedData,
EYETRACKER_NOTIFICATION_EYE_TRACKING_MODE_CHANGED: EyeTrackingModeChangedData,
EYETRACKER_NOTIFICATION_DEVICE_FAULTS: DeviceFaultsData,
EYETRACKER_NOTIFICATION_DEVICE_WARNINGS: DeviceWarningsData
}
# The order of these numbers have to be the same as in the enum CallbackType in py_callbacks.h
_subscription_types = {EYETRACKER_GAZE_DATA:
{"type_index": 1,
"stream_name": "gaze data",
"data_class": GazeData},
EYETRACKER_EXTERNAL_SIGNAL:
{"type_index": 2,
"stream_name": "external signal",
"data_class": ExternalSignalData},
EYETRACKER_TIME_SYNCHRONIZATION_DATA:
{"type_index": 3,
"stream_name": "time synchronization data",
"data_class": TimeSynchronizationData},
EYETRACKER_STREAM_ERRORS:
{"type_index": 4,
"stream_name": "",
"data_class": StreamErrorData},
_EYETRACKER_NOTIFICATIONS:
{"type_index": 5,
"stream_name":
"notifications",
"data_class": dict},
EYETRACKER_EYE_IMAGES:
{"type_index": 6,
"stream_name": "eye images",
"data_class": EyeImageData},
EYETRACKER_HMD_GAZE_DATA:
{"type_index": 7,
"stream_name": "HMD gaze data",
"data_class": HMDGazeData},
EYETRACKER_USER_POSITION_GUIDE:
{"type_index": 8,
"stream_name": "user position guide",
"data_class": UserPositionGuide}
}
def __log_callback(user_callback, as_dictionary, data_dict):
if as_dictionary:
user_callback(data_dict)
else:
user_callback(_LogEntry(data_dict))
def _logging_subscribe(callback, as_dictionary=False):
interop.subscribe_to(0, "", None, 0, lambda x: __log_callback(callback, as_dictionary, x))
def _logging_unsubscribe():
interop.unsubscribe_from(0, None, 0)
class EyeTracker(object):
'''Provides methods and properties to manage and get data from an eye tracker.
EyeTracker objects are either created from an address or returned in a tuple from @ref find_all_eyetrackers.
'''
def __init__(self, address):
'''Gets an eye tracker object that has the specified URI.
<CodeExample>create_eyetracker.py</CodeExample>
Args:
address: Address (URI) to the eye tracker.
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
ValueError
'''
self.__notification_subscription_lock = threading.RLock()
self.__notification_subscriptions = {}
self.__subscription_lock = threading.RLock()
self.__subscriptions = {}
if type(address) is str:
self.__init_from_address(address)
elif isinstance(address, interop.TobiiProEyeTrackerData):
self.__init_from_data(address)
else:
raise ValueError("An EyeTracker must be initialized with a URI.")
def __del__(self):
with self.__subscription_lock:
for subscription_type in self.__subscriptions.keys():
interop.unsubscribe_from(subscription_type, self, self.__core_eyetracker)
def __init_from_address(self, address):
self.__init_from_data(interop.get_device(address))
def __init_from_data(self, data):
self.__address = data.address
self.__device_name = data.device_name
self.__serial_number = data.serial_number
self.__model = data.model
self.__firmware_version = data.firmware_version
self.__runtime_version = data.runtime_version
self.__device_capabilities = data.device_capabilities
self.__core_eyetracker = data.core_eyetracker
def __notification_callback(self, data):
with self.__notification_subscription_lock:
for callback, as_dictionary in\
self.__notification_subscriptions.get(data["notification_type"], {}).items():
data_class = dict if as_dictionary else _available_notification_subscriptions[data["notification_type"]]
callback(data_class(data))
def __subscription_callback(self, subscription_type, data):
global _subscription_types
with self.__subscription_lock:
for callback, as_dictionary in self.__subscriptions.get(subscription_type, {}).items():
data_class = dict if as_dictionary else _subscription_types[subscription_type]["data_class"]
callback(data_class(data))
@property
def address(self):
'''Gets the address (URI) of the eye tracker device.
'''
return self.__address
@property
def device_name(self):
'''Gets the name of the eye tracker.
'''
return self.__device_name
@property
def serial_number(self):
'''Gets the serial number of the eye tracker. All physical eye trackers have a unique serial number.
'''
return self.__serial_number
@property
def model(self):
'''Gets the model of the eye tracker.
'''
return self.__model
@property
def firmware_version(self):
'''Gets the firmware version of the eye tracker.
'''
return self.__firmware_version
@property
def runtime_version(self):
'''Gets the runtime version of the eye tracker.
'''
return self.__runtime_version
@property
def device_capabilities(self):
'''Gets a tuple with the capabilities of the device.
Valid values in the tuple are @ref CAPABILITY_CAN_SET_DISPLAY_AREA, @ref CAPABILITY_HAS_EXTERNAL_SIGNAL and
@ref CAPABILITY_HAS_EYE_IMAGES.
'''
return self.__device_capabilities
def apply_licenses(self, license_key_ring):
'''Sets a key ring of licenses or a single license for unlocking features of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>apply_licenses.py</CodeExample>
Args:
license_key_ring: List of LicenseKey objects, list of bytes, LicenseKey object or bytes object.
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
AttributeError
TypeError
Returns:
Tuple of FailedLicense objects for licenses that failed.
Empty tuple if all licenses were successfully applied.
'''
if isinstance(license_key_ring, bytes):
return interop.apply_licenses(self.__core_eyetracker, (license_key_ring,))
elif hasattr(license_key_ring, 'key_string'):
return interop.apply_licenses(self.__core_eyetracker, (license_key_ring.key_string,))
else:
return interop.apply_licenses(self.__core_eyetracker,
tuple([(lambda key: key
if isinstance(key, bytes) else key.key_string)(key)
for key in license_key_ring]))
def clear_applied_licenses(self):
'''Clears any previously applied licenses.
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
'''
return interop.clear_applied_licenses(self.__core_eyetracker)
def retrieve_calibration_data(self):
'''Gets the calibration data used currently by the eye tracker.
This data can be saved to a file for later use.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>calibration_data.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
'''
return interop.calibration_retrieve(self.__core_eyetracker)
def apply_calibration_data(self, calibration_data):
'''Sets the provided calibration data to the eye tracker, which means it will be active calibration.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>calibration_data.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
ValueError
'''
interop.calibration_apply(self.__core_eyetracker, calibration_data)
def get_all_gaze_output_frequencies(self):
'''Gets a list of gaze output frequencies supported by the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>gaze_output_frequencies.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Tuple of floats with all gaze output frequencies.
'''
return interop.get_all_gaze_output_frequencies(self.__core_eyetracker)
def get_gaze_output_frequency(self):
'''Gets the gaze output frequency of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>gaze_output_frequencies.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Float with the current gaze output frequency.
'''
return interop.get_gaze_output_frequency(self.__core_eyetracker)
def set_gaze_output_frequency(self, gaze_output_frequency):
'''Sets the gaze output frequency of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>gaze_output_frequencies.py</CodeExample>
Args:
gaze_output_frequency: The gaze output frequency as a float value.
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
ValueError
'''
return interop.set_gaze_output_frequency(self.__core_eyetracker, float(gaze_output_frequency))
def get_all_eye_tracking_modes(self):
'''Gets a tuple of eye tracking modes supported by the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>eye_tracking_modes.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Tuple of strings with available eye tracking modes.
'''
return interop.get_all_eye_tracking_modes(self.__core_eyetracker)
def get_eye_tracking_mode(self):
'''Gets the eye tracking mode of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>eye_tracking_modes.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
String with the current eye tracking mode.
'''
return interop.get_eye_tracking_mode(self.__core_eyetracker)
def set_eye_tracking_mode(self, eye_tracking_mode):
'''Sets the eye tracking mode of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>eye_tracking_modes.py</CodeExample>
Args:
eye_tracking_mode: The eye tracking mode as a string.
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
ValueError
'''
return interop.set_eye_tracking_mode(self.__core_eyetracker, eye_tracking_mode)
def get_track_box(self):
'''Gets the track box of the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>get_track_box.py</CodeExample>
Raises:
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Track box in the user coordinate system as a TrackBox object.
'''
return interop.get_track_box(self.__core_eyetracker)
def get_display_area(self):
''' Gets the size and corners of the display area.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>get_and_set_display_area.py</CodeExample>
Raises:
EyeTrackerFeatureNotSupportedError
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Display area in the user coordinate system as a DisplayArea object.
'''
return interop.get_display_area(self.__core_eyetracker)
def set_display_area(self, display_area):
''' Sets the display area of the eye tracker.
It is strongly recommended to use Eye Tracker Manager to calculate the display area coordinates
as the origin of the User Coordinate System differs between eye tracker models.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>get_and_set_display_area.py</CodeExample>
Args:
display_area: The eye tracker's desired display_area as a DisplayArea object.
Raises:
EyeTrackerFeatureNotSupportedError
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
'''
interop.set_display_area(self.__core_eyetracker, display_area)
def get_hmd_lens_configuration(self):
''' Gets the current lens configuration of the HMD based eye tracker.
The lens configuration describes how the lenses of the HMD device are positioned.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>get_hmd_lens_configuration.py</CodeExample>
Raises:
EyeTrackerFeatureNotSupportedError
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
Returns:
Lens configuration as a HMDLensConfiguration object.
'''
return interop.get_hmd_lens_configuration(self.__core_eyetracker)
def set_hmd_lens_configuration(self, lens_configuration):
''' Sets the lens configuration of the HMD based eye tracker.
The lens configuration describes how the lenses of the HMD device are positioned
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>set_hmd_lens_configuration.py</CodeExample>
Args:
lens_configuration: The eye tracker's desired lens configuration as a HMDLensConfiguration object.
Raises:
EyeTrackerFeatureNotSupportedError
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
'''
interop.set_hmd_lens_configuration(self.__core_eyetracker, lens_configuration)
def set_device_name(self, device_name):
'''Changes the device name. This is not supported by all eye trackers.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
<CodeExample>set_device_name.py</CodeExample>
Args:
device_name: The eye tracker's desired name.
Raises:
EyeTrackerFeatureNotSupportedError
EyeTrackerConnectionFailedError
EyeTrackerInternalError
EyeTrackerLicenseError
'''
interop.set_device_name(self.__core_eyetracker, device_name)
self.__init_from_data(interop.get_device_data(self.__core_eyetracker))
def subscribe_to(self, subscription_type, callback, as_dictionary=False):
'''Subscribes to data for the eye tracker.
See @ref find_all_eyetrackers or EyeTracker.__init__ on how to create an EyeTracker object.
You can subscribe to @ref EYETRACKER_EXTERNAL_SIGNAL, | |
# flake8: noqa
import argparse
import os
import json
import sys
import time
from typing import Tuple
import boto3
import dask
import dask.dataframe as dd
import mlflow
import pandas as pd
import ray
import torch
import torch.nn as nn
import torch.optim as optim
from dask_ml.compose import ColumnTransformer
from dask_ml.model_selection import train_test_split
from dask_ml.preprocessing import OneHotEncoder
from dask_ml.preprocessing import StandardScaler
from ray import train
from ray.train import Trainer
from ray.train import TrainingCallback
from ray.train.callbacks import TBXLoggerCallback
from ray.util.dask import ray_dask_get
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
# TODO(amogkam): Upstream this into Ray Train.
class MLflowCallback(TrainingCallback):
def __init__(self, config):
self.config = config
def handle_result(self, results, **info):
# For each result that's being reported by ``train.report()``,
# we get the result from the rank 0 worker (i.e. first worker) and
# report it to MLflow.
rank_zero_results = results[0]
mlflow.log_metrics(rank_zero_results)
# TODO: fix type hint for logdir
def start_training(self, logdir, **info):
mlflow.start_run(run_name=str(logdir.name))
mlflow.log_params(config)
# TODO: Update TrainCallback to provide logdir in finish_training.
self.logdir = logdir
def finish_training(self, error: bool = False, **info):
# Save the Trainer checkpoints as artifacts to mlflow.
mlflow.log_artifacts(self.logdir)
def read_dask_dataframe(path: str) -> "dask.DataFrame":
print(f"reading data from {path}")
dataset = ray.data.read_parquet(path, _spread_resource_prefix="node:")
print(f"total data size: {dataset.size_bytes()}")
return dataset.random_shuffle(
_spread_resource_prefix="node:").to_dask().reset_index()
class DataPreprocessor:
def __init__(self):
self.column_transformer = None
self.scaler = None
def preprocess_train_data(self, df: "dask.DataFrame"
) -> Tuple["dask.DataFrame", "dask.DataFrame"]:
return self._preprocess(df, False)
def preprocess_inference_data(
self,
df: "dask.DataFrame") -> Tuple["dask.DataFrame", "dask.DataFrame"]:
return self._preprocess(df, True)[0]
def _preprocess(self, df: "dask.DataFrame", inferencing: bool
) -> Tuple["dask.DataFrame", "dask.DataFrame"]:
df = df.loc[:, df.columns != "index"]
# remove nulls and/or NaNs scalably with dask
print(f"step1: drop nulls from rows")
df = df.dropna(subset=["nullable_feature"])
print(f"step2: creating new_col and updatingfeature_1")
df["new_col"] = (
df["feature_1"] - 2 * df["feature_2"] + df["feature_3"]) / 3.
df["feature_1"] = 2. * df["feature_1"] + 0.1
# TODO: this doesn't work with more than 1 parquet file
# df['mean_by_fruit'] = df.groupby('fruit')['feature_1'].transform('mean')
print(f"step3: one-hot encoding fruit")
df = df.astype({"fruit": "category"})
df = df.categorize()
df.persist()
if inferencing:
assert self.column_transformer is not None
df_fruits = self.column_transformer.transform(df)
else:
assert self.column_transformer is None
self.column_transformer = ColumnTransformer(
[("one-hot", OneHotEncoder(sparse=False), ["fruit"])])
df_fruits = self.column_transformer.fit_transform(df)
df_data = df.loc[:, (df.columns != "label") & (df.columns != "fruit")]
df_data = dd.concat([df_data, df_fruits], axis=1)
assert df_data.isnull().sum().sum().compute(
) == 0, "There are nulls or Nans in the data!"
if inferencing:
print(f"step4: standardrize inference dataset")
assert self.scaler is not None
df_data_inference = self.scaler.transform(df_data)
return df_data_inference, None
else:
print(f"step4: standardrize train dataset")
df_labels = df.loc[:, df.columns == "label"]
df_data_train, df_data_test, df_label_train, df_label_test = train_test_split(
df_data, df_labels)
df_data_train.persist()
assert self.scaler is None
self.scaler = StandardScaler(
) # this just turns col values to z-scores
df_data_train = self.scaler.fit_transform(df_data_train)
df_data_test = self.scaler.transform(df_data_test)
df_train = dd.concat([df_data_train, df_label_train], axis=1)
df_test = dd.concat([df_data_test, df_label_test], axis=1)
return df_train, df_test
def inference(dataset, model_cls: type, batch_size: int, result_path: str,
use_gpu: bool):
print("inferencing...")
num_gpus = 1 if use_gpu else 0
dataset.map_batches(model_cls, compute="actors", batch_size=batch_size, num_gpus=num_gpus, num_cpus=0). \
write_parquet(result_path)
"""
TODO: Define neural network code in pytorch
P0:
1) can take arguments to change size of net arbitrarily so we can stress test against distributed training on cluster
2) has a network (nn.module?), optimizer, and loss function for binary classification
3) has some semblence of regularization (ie: via dropout) so that this artificially gigantic net doesn't just overfit horrendously
4) works well with pytorch dataset we'll create from Ray data .to_torch_dataset()
P1:
1) also tracks AUC for training, testing sets and records to tensorboard to
"""
class Net(nn.Module):
def __init__(self, n_layers, n_features, num_hidden, dropout_every,
drop_prob):
super().__init__()
self.n_layers = n_layers
self.dropout_every = dropout_every
self.drop_prob = drop_prob
self.fc_input = nn.Linear(n_features, num_hidden)
self.relu_input = nn.ReLU()
for i in range(self.n_layers):
layer = nn.Linear(num_hidden, num_hidden)
relu = nn.ReLU()
dropout = nn.Dropout(p=self.drop_prob)
setattr(self, f"fc_{i}", layer)
setattr(self, f"relu_{i}", relu)
if i % self.dropout_every == 0:
# only apply every few layers
setattr(self, f"drop_{i}", dropout)
self.add_module(f"drop_{i}", dropout)
self.add_module(f"fc_{i}", layer)
self.add_module(f"relu_{i}", relu)
# self.register_parameter(name=f"fc_{i}", param=getattr(self, f"fc_{i}"))
# self.register_parameter(name=f"relu_{i}", param=getattr(self, f"relu_{i}"))
self.fc_output = nn.Linear(num_hidden, 1)
def forward(self, x):
x = self.fc_input(x)
x = self.relu_input(x)
for i in range(self.n_layers):
x = getattr(self, f"fc_{i}")(x)
x = getattr(self, f"relu_{i}")(x)
if i % self.dropout_every == 0:
x = getattr(self, f"drop_{i}")(x)
x = self.fc_output(x)
return x
"""
TODO: training loop for NN
P0 Requirements:
1) should iterate through epochs, inner loop per batch
2) should keep running total of accuracy, loss (training & test) and record those to tensorboard
3) should perform windowing / shuffling per epoch
P1:
1) use Ray Tune for tuning / checkpointing
"""
def train_epoch(dataset, model, device, criterion, optimizer):
num_correct = 0
num_total = 0
running_loss = 0.0
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"training batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def test_epoch(dataset, model, device, criterion):
num_correct = 0
num_total = 0
running_loss = 0.0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"testing batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def train_func(config):
is_distributed = config.get("is_distributed", False)
use_gpu = config["use_gpu"]
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
print("Defining model, loss, and optimizer...")
# Setup device.
if is_distributed:
device = torch.device(f"cuda:{train.local_rank()}" if use_gpu
and torch.cuda.is_available() else "cpu")
else:
device = torch.device("cuda:0" if use_gpu
and torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# Setup data.
if is_distributed:
train_dataset_pipeline = train.get_dataset_shard("train_dataset")
train_dataset_epoch_iterator = train_dataset_pipeline.iter_epochs()
test_dataset = train.get_dataset_shard("test_dataset")
else:
train_dataset_epoch_iterator = config["train_dataset"].iter_epochs()
test_dataset = config["test_dataset"]
test_torch_dataset = test_dataset.to_torch(
label_column="label", batch_size=batch_size)
# Setup Tensorboard and MLflow.
if is_distributed:
# Setup is done through Callback.
pass
else:
writer = SummaryWriter()
mlflow.start_run()
mlflow_config = config.copy()
mlflow_config.pop("test_dataset")
mlflow_config.pop("train_dataset")
mlflow.log_params(mlflow_config)
net = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob,
).to(device)
print(net.parameters)
if is_distributed:
net = DistributedDataParallel(net)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
print("Starting training...")
for epoch in range(num_epochs):
train_dataset = next(train_dataset_epoch_iterator)
train_torch_dataset = train_dataset.to_torch(
label_column="label", batch_size=batch_size)
train_running_loss, train_num_correct, train_num_total = train_epoch(
train_torch_dataset, net, device, criterion, optimizer)
train_acc = train_num_correct / train_num_total
print(
f"epoch [{epoch + 1}]: training accuracy: {train_num_correct} / {train_num_total} = {train_acc:.4f}"
)
test_running_loss, test_num_correct, test_num_total = test_epoch(
test_torch_dataset, net, device, criterion)
test_acc = test_num_correct / test_num_total
print(
f"epoch [{epoch + 1}]: testing accuracy: {test_num_correct} / {test_num_total} = {test_acc:.4f}"
)
# Record and log stats.
if is_distributed:
train.report(
train_acc=train_acc,
train_loss=train_running_loss,
test_acc=test_acc,
test_loss=test_running_loss)
else:
writer.add_scalar("Accuracy/train", train_acc, epoch)
writer.add_scalar("Loss/train", train_running_loss, epoch)
writer.add_scalar("Accuracy/test", test_acc, epoch)
writer.add_scalar("Loss/test", test_running_loss, epoch)
writer.flush()
mlflow.log_metrics({
"train_acc": train_acc,
"train_loss": train_running_loss,
"test_acc": test_acc,
"test_loss": test_running_loss
})
# Checkpoint model.
if is_distributed:
import copy
model_copy = copy.deepcopy(net.module)
train.save_checkpoint(
model_state_dict=model_copy.cpu().state_dict())
else:
torch.save(net.state_dict(), f"models/model-epoch-{epoch}.torch")
# Shutdown Tensorboard and MLflow.
if is_distributed:
pass
else:
writer.close()
# mlflow.end_run()
if is_distributed:
if train.world_rank() == 0:
return net.module.cpu()
else:
return None
else:
return net
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use-s3",
action="store_true",
default=False,
help="Use data from s3 for testing.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
parser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for Ray. `auto` if running through `ray submit"
)
parser.add_argument(
"--num-workers",
default=1,
type=int,
help="If > 1, number of Ray workers to use for distributed training")
parser.add_argument(
"--large-dataset",
action="store_true",
default=False,
help="Use 500GB dataset")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Use GPU for training.")
parser.add_argument(
"--mlflow-register-model",
action="store_true",
help="Whether to use mlflow model registry. If set, a local MLflow "
"tracking server is expected to have already been started.")
args = parser.parse_args()
smoke_test = args.smoke_test
address = args.address
num_workers = args.num_workers
use_gpu = args.use_gpu
use_s3 = args.use_s3
large_dataset = args.large_dataset
if large_dataset:
assert use_s3, "--large-dataset requires --use-s3 to be set."
start_time = time.time()
ray.init(address=address)
dask.config.set(scheduler=ray_dask_get)
# Setup MLflow.
# By default, all metrics & artifacts for each run will be saved to disk
# in ./mlruns directory. Uncomment the below lines if you want to change
# the URI | |
<reponame>satroutr/poppy
# coding= utf-8
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
import uuid
from nose.plugins import attrib
from tests.api import providers
from tests.api.utils import config
@ddt.ddt
class TestAuthorizationService(providers.TestProviderBase):
"""Security Tests for authorization vulnerabilities
These test cases cover authorization checks for service functions.
They check whether it is possible to create/patch/list/delete services
without valid tokens or no token at all. It also checks whether it is
possible to create/patch/list/delete services for one user using another
user's valid token. The supposed responses should be 401 errors.
"""
def setUp(self):
"""Setup for the tests"""
super(TestAuthorizationService, self).setUp()
self.auth_config = config.AuthConfig()
if self.auth_config.auth_enabled is False:
self.skipTest(
'Auth is currently disabled in configuration')
self.service_url = ''
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_flavor
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_create_service_no_token(self, test_data):
"""Check creating a service without token."""
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
# create header without token
headers = {"X-Auth-Token": ""}
kwargs = {"headers": headers}
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_create_service_other_user_token(self, test_data):
"""Check creating a service with another user's token."""
# replace the token with another user's token
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
@attrib.attr('security')
def test_authorization_list_services_other_user_token(self):
"""Check listing services with another user's token."""
self.service_url = ''
# replace the token with another user's token
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
resp = self.client.list_services(requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
def test_authorization_list_service_no_token(self):
"""Check listing all services with no token."""
self.service_url = ''
# create header without token
headers = {"X-Auth-Token": ""}
kwargs = {"headers": headers}
resp = self.client.list_services(requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_get_service_other_user_token(self, test_data):
"""Check getting one service with another user's token"""
# replace the token with another user's token
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
resp = self.client.get_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_get_service_no_token(self, test_data):
"""Check getting a service with no token."""
# create header without token
headers = {"X-Auth-Token": ""}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
resp = self.client.get_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_delete_service_no_token(self, test_data):
"""Check deleting a service with no token."""
# create header without token
headers = {"X-Auth-Token": ""}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
if self.service_url != '':
resp = self.client.delete_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
if self.service_url != '':
self.client.delete_service(location=self.service_url)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_patch_service_another_token(self, test_data):
"""Check patching a service with another user's token."""
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
test_patch_data = []
domain_name = "api-test.replacemereplaceme%s.com" % str(uuid.uuid1())
test_patch_data.append({"op": "add",
"path": "/domains/-",
"value": {"domain": "%s" % (domain_name)}})
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
if self.service_url != '':
resp = self.client.patch_service(location=self.service_url,
request_body=test_patch_data,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_delete_service_other_user_token(self, test_data):
"""Check deleting one service with another user's token."""
# replace the token with another user's token
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
if self.service_url != '':
resp = self.client.delete_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_delete_service_invalid_token(self, test_data):
"""Check deleting a service with invalid token."""
# create header without token
headers = {"X-Auth-Token": "1" * 1000}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
if self.service_url != '':
resp = self.client.delete_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_get_service_invalid_token(self, test_data):
"""Check getting a service with invalid token."""
# create header with invalid token
headers = {"X-Auth-Token": "1" * 1000}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
resp = self.client.get_service(location=self.service_url,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
def test_authorization_list_service_invalid_token(self):
"""Check listing all services with invalid token."""
self.service_url = ''
# create header with invalid token
headers = {"X-Auth-Token": "1" * 1000}
kwargs = {"headers": headers}
resp = self.client.list_services(requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_create_service_invalid_token(self, test_data):
"""Check creating a service with an invalid token."""
# create header with invalid token
headers = {"X-Auth-Token": "1" * 1000}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code == 401)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_patch_service_invalid_token(self, test_data):
"""Check patching a service with an invalid token."""
# create header with invalid token
headers = {"X-Auth-Token": "1" * 1000}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + str(uuid.uuid1()) + '.com'
origin_list = test_data['origin_list']
caching_list = test_data['caching_list']
flavor_id = self.flavor_id
test_patch_data = []
domain_name = "api-test.replacemereplaceme%s.com" % str(uuid.uuid1())
test_patch_data.append({"op": "add",
"path": "/domains/-",
"value": {"domain": "%s" % (domain_name)}})
resp = self.client.create_service(service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=flavor_id)
self.assertTrue(resp.status_code == 202)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
resp = self.client.get_service(location=self.service_url)
self.assertTrue(resp.status_code == 200)
resp = self.client.patch_service(location=self.service_url,
request_body=test_patch_data,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code == 401)
self.client.delete_service(location=self.service_url)
@attrib.attr('security')
@ddt.file_data('data_create_service_authorization.json')
def test_authorization_patch_service_other_user_token(self, test_data):
"""Check patching service with a valid token from another user."""
# replace the token with another user's token
headers = {"X-Auth-Token": self.alt_user_client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers}
domain_list = test_data['domain_list']
for item in domain_list:
item['domain'] = 'api-test.' + | |
<gh_stars>0
# -*- coding: utf-8 -*-
r"""
Interface for a simple data store where the root and subtables are nested
Python :py:class:`dict` objects.
AUTHORS:
- <NAME> (2010-03-01): initial version, 0.1
EXAMPLES::
from neuronpy.util import dictdb
import datetime
the_dict = { \
'sub1' : { \
'key_a' : { \
'key_a_a' : 1, \
'time_stamp' : \
datetime.datetime(2010, 7, 23, 18, 43, 36, 640692), \
'key_a_b' : 'ABCDEFG' }, \
'key_b' : [1,2,3], \
'key_c' : { \
'key_c_a' : 2, \
'key_c_b' : '<KEY>' }}, \
'sub2' : { \
'key_a' : { \
'key_a_a' : 2, \
'time_stamp' : \
datetime.datetime(2010, 8, 23, 18, 43, 36, 640692), \
'key_a_b' : 'XYZPDQ' }, \
'key_b' : [4,5,6], \
'key_c' : { \
'key_c_a' : 1, \
'key_c_b' : 'ABCDEFG' }}, \
'different_sub1' : { \
'different_key_a' : { \
'different_key_a_a' : 3, \
'different_key_a_b' : [1,2,3,4,5] }, \
'different_key_b' : None, \
'different_key_c' : { \
'different_key_c_a' : 2.0, \
'different_key_c_b' : ['a', 'b', 'c'] }} \
}
Nodes ``'sub1_a'``, ``'sub1_b'``, and ``'different_sub1_a'`` refer to trunk
nodes, which may be thought of as records in the database.
Notice that in this dict, subdictionaries can each be of arbitrary depth, and
contain any data type that can be put into a dict. This example shows records
``'sub1_a'`` and ``'sub1_b'`` share the same structure, but ``different_sub1``
contains different data types.
There are two related functions for retrieving records: :func:`filter_dict` and
:func:`match`. Both approaches use user-defined :py:keyword:`lambda` functions.
The :func:`filter_dict` function is itself a generator function that operates
on ``(key, value, parent_keys)`` tuples. This function therefore allows
any of these values to be ignored and one can search for keys or values
irrespective of their key associations.
The :func:`match` method permits multiple queries where a given key meets some
condition. The key and the
condition as a :py:keyword:`lambda` function are provided in a tuple:
``('key', lambda v: <some_operation_with_v>)`` where *"some_operation_with_v"*
evaluates to ``True`` or ``False``.
"""
# While this software is under the permissive MIT License,
# (http://www.opensource.org/licenses/mit-license.php)
# We ask that you cite the neuronpy package (or tools used in this package)
# in any publications and contact the author with your referenced publication.
#
# Format:
# McTavish, T.S. NeuronPy library, version 0.1, http://bitbucket.org/tommctavish/neuronpy
#
# Copyright (c) 2010 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def filter_dict(the_dict, predicate=lambda k, v, p: True):
r"""
Filter a dict by some function on ``(key, value, parent_key)`` tuples.
:param the_dict: The dict to filter.
:param predicate: A :py:keyword:`lambda` function on ``(k, v, p)``,
key, value, key_parent tuples. Where ``k`` and ``v`` are their normal,
``key-value``s and where ``p`` is a list of sub-dict keys, from the
trunk of the_dict to the current key, ``k``.
EXAMPLES:
Retrieve a subdict that contains any value with ``'ABCDEFG'``. In this
case, ``k`` and ``p`` in the lambda function are ignored.
.. code-block:: python
filtered = dict(filter_dict(the_dict, lambda k, v, p: v=='ABCDEFG'))
Retrieve a subdict where the ``'time_stamp'`` is greater than 30 days.
.. code-block:: python
def date_filter(k, v, p):
return k == 'time_stamp' and \
(datetime.datetime.now() - v).days >= 30
filtered = dict(filter_dict(the_dict, date_filter(k, v, p)))
If a sub-key is not unique, then we can query the parent keys. In this
case, ``p`` is a list of keys starting from the first sub-dict to the
current key. Therefore, ``k == p[-1]``. This example searches for string
'ABCDEF', but mandates that ``k == 'key_c_b'`` and the parent key is
``p == 'key_c'``.
.. code-block:: python
def f(k, v, p):
return k == 'key_c_b' and len(p) >= 2 and p[-2] == 'key_c'
filtered = dict(filter_dict(the_dict, f(k, v, p)))
"""
parent_keys = []
for k, v in the_dict.iteritems():
parent_keys[:] = []
parent_keys.append(k)
if isinstance(v, dict) and _filter_dict_sub(predicate, v, parent_keys):
yield k, v
def _filter_dict_sub(predicate, the_dict, parent_keys):
"""
Sub-function to filter_dict for recursing.
"""
for k, v in the_dict.iteritems():
parent_keys.append(k)
if isinstance(v, dict) and _filter_dict_sub(predicate, v, parent_keys):
parent_keys.pop()
return True
if predicate(k, v, parent_keys):
parent_keys.pop()
return True
parent_keys.pop()
return False
def match(the_dict, queries, mode = 'AND', search_type = 0):
r"""
Retrieve the root items of ``the_dict`` where
nested keys match the conditions to some number of queries.
:param the_dict: The :py:class:`dict` to search.
:param queries: A tuple or list of 2- (or preferrably 3-) element tuples
with the first element being the nested key to search for and the
second tuple is the condition on that key's value. If a query cannot
be processed, it will be ignored, but a warning will print.
An optional third element can be specified as the list of parent
keys to help speed the search query and also help guarantee precise
results. By default, the first key found in any nested dict that
matches the key being searched for in the
query (the first element in the tuple) will be the only key evaluated.
The search algorithm will not continue looking for the key in other
nested dicts. Therefore, duplicate keys in nested dicts should be
avoided. If they are used, however, then you can specify the key of
the parent.
.. Note::
It is efficient to specify the parent keys as much as possible
because the query key may be nested quite deep. Using parent keys
permits a more direct lookup of the query key.
:param mode: Is ``'AND'`` by default and can be ``'OR'``. To mix,
``'AND'`` and ``'OR'`` queries, retrieve the sub-dictionary in one
condition and then call this function again passing in that
sub-dictionary with queries for the other condition.
.. Note::
Two or more ``'AND'`` queries prune a copy of the dictionary. The
first query returns a subdictionary that the second query operates
on and so forth. Searches through the subdictionaries will
therefore be faster if the first queries prune more of the original
dict than subsequent queries. Likewise, when mixing ``'AND'`` and
``'OR'`` queries, it is more efficient to process ``'AND'``
queries first.
:param search_type: By default is 0, but can be 1, or 2. Searches are
depth-first. This number specifies what is to happen when a key (and
any parent keys) are found:
- ``0``
Shallow search. When a match is found exit up to the root dict and
do not continue searching the trunk this branch is on.
- ``1``
Do not continue searching this branch.
- ``2``
Continue search this branch for sub-keys with the same value.
:return: A sub-dict of ``the_dict`` where nested keys match the conditions
to queries. The elements in the returned dict are
copies of ``the_dict``, which remains unaltered.
EXAMPLES:
To search for records where ``'key_a_a' == 2``:
.. code-block:: python
subdict = dictdb.match(the_dict, ('key_a_a',lambda v: v==2))
for key in subdict.keys():
print key
| |
<reponame>UmaTaru/run
from functools import reduce
import torch
from torch._utils import _accumulate
from ..function import Function, InplaceFunction, once_differentiable
from ..variable import Variable
class Index(Function):
@staticmethod
def forward(ctx, i, index):
ctx.input_size = i.size()
ctx.index = index
result = i.index(ctx.index)
ctx.mark_shared_storage((i, result))
return result
@staticmethod
def backward(ctx, grad_output):
grad_input = Variable(grad_output.data.new(ctx.input_size).zero_())
grad_input[ctx.index] = grad_output
return grad_input, None
class SetItem(InplaceFunction):
@staticmethod
def forward(ctx, i, index, value):
assert not isinstance(index, Variable)
ctx.mark_dirty(i)
ctx.index = index
ctx.tensor_value = torch.is_tensor(value)
if ctx.tensor_value:
ctx.value_size = value.size()
i._set_index(ctx.index, value)
return i
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
grad_input[ctx.index] = 0
grad_value = None
if ctx.tensor_value:
grad_value = grad_output[ctx.index].contiguous().view(ctx.value_size)
return grad_input, None, grad_value
# TODO: how to do NoGrad in new style
class NoGrad(Function):
def forward(self, i):
result = i.new(i)
self.mark_non_differentiable(result)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
assert False, "backward of NoGrad should never be called"
def _do_forward(self, *args, **kwargs):
result = super(NoGrad, self)._do_forward(*args, **kwargs)
self.requires_grad = False
return result
__call__ = _do_forward
class Transpose(Function):
@staticmethod
def forward(ctx, i, dim1, dim2):
result = i.transpose(dim1, dim2)
ctx.dims = (dim1, dim2)
ctx.mark_shared_storage((i, result))
return result
@staticmethod
def backward(ctx, grad_output):
return grad_output.transpose(*ctx.dims), None, None
class View(Function):
@staticmethod
def forward(ctx, i, sizes):
ctx.new_sizes = sizes
ctx.old_size = i.size()
result = i.view(*sizes)
ctx.mark_shared_storage((i, result))
return result
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous().view(ctx.old_size), None
class Expand(Function):
@staticmethod
def forward(ctx, i, new_size):
ctx.num_unsqueezed = len(new_size) - i.dim()
ctx.expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(new_size[ctx.num_unsqueezed:], i.size()))
if expanded != original]
result = i.expand(*new_size)
ctx.mark_shared_storage((i, result))
return result
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output
for i in range(ctx.num_unsqueezed):
grad_input = grad_input.sum(0).squeeze(0)
for dim in ctx.expanded_dims:
grad_input = grad_input.sum(dim)
return grad_input, None
class Type(Function):
@staticmethod
def forward(ctx, i, dest_type):
ctx.input_type = type(i)
return i.type(dest_type)
@staticmethod
def backward(ctx, grad_output):
return grad_output.type(ctx.input_type), None
class CudaTransfer(Function):
@staticmethod
def forward(ctx, i, device_id=None, async=False):
ctx.source_device = -1 if not i.is_cuda else i.get_device()
ctx.source_was_cuda = i.is_cuda
if device_id:
return i.cuda(device_id, async=async)
else:
return i.cuda(async=async)
@staticmethod
def backward(ctx, grad_output):
if ctx.source_device != -1:
return grad_output.cuda(ctx.source_device), None, None
elif ctx.source_was_cuda:
return grad_output, None, None
else:
return grad_output.cpu(), None, None
class Permute(Function):
@staticmethod
def forward(ctx, input, dim_indices):
ctx.rev_dim_indices = [None for _ in range(len(dim_indices))]
for i, dim_idx in enumerate(dim_indices):
ctx.rev_dim_indices[dim_idx] = i
result = input.permute(*dim_indices)
ctx.mark_shared_storage((input, result))
return result
@staticmethod
def backward(ctx, grad_output):
return grad_output.permute(*ctx.rev_dim_indices), None
class IndexAdd(InplaceFunction):
@staticmethod
def forward(ctx, tensor1, dim, index, tensor2, inplace=False):
assert not ctx.needs_input_grad[2]
ctx.dim = dim
if ctx.needs_input_grad[3]:
ctx.save_for_backward(index)
if not inplace:
tensor1 = tensor1.clone()
else:
ctx.mark_dirty(tensor1)
return tensor1.index_add_(ctx.dim, index, tensor2)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_tensor1 = grad_tensor2 = None
if ctx.needs_input_grad[0]:
grad_tensor1 = grad_output
if ctx.needs_input_grad[3]:
index, = ctx.saved_tensors
grad_tensor2 = grad_output.index_select(ctx.dim, index)
return grad_tensor1, None, None, grad_tensor2, None
class IndexCopy(InplaceFunction):
@staticmethod
def forward(ctx, tensor1, dim, index, tensor2, inplace=False):
assert not ctx.needs_input_grad[2]
ctx.dim = dim
if any(ctx.needs_input_grad):
ctx.save_for_backward(index)
if not inplace:
tensor1 = tensor1.clone()
else:
ctx.mark_dirty(tensor1)
return tensor1.index_copy_(dim, index, tensor2)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_tensor1 = grad_tensor2 = None
if any(ctx.needs_input_grad):
index, = ctx.saved_tensors
if ctx.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().index_fill_(ctx.dim, index, 0)
if ctx.needs_input_grad[2]:
grad_tensor2 = grad_output.index_select(ctx.dim, index)
return grad_tensor1, None, None, grad_tensor2, None
class IndexFill(InplaceFunction):
@staticmethod
def forward(ctx, tensor, dim, index, value, inplace=False):
ctx.dim = dim
assert not ctx.needs_input_grad[2]
if ctx.needs_input_grad[0]:
ctx.save_for_backward(index)
if not inplace:
tensor = tensor.clone()
else:
ctx.mark_dirty(tensor)
return tensor.index_fill_(dim, index, value)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_tensor = None
if ctx.needs_input_grad[0]:
index, = ctx.saved_tensors
grad_tensor = grad_output.clone().index_fill_(ctx.dim, index, 0)
return grad_tensor, None, None, None, None
class IndexSelect(Function):
@staticmethod
def forward(ctx, tensor, dim, index):
ctx.dim = dim
assert not ctx.needs_input_grad[2]
if ctx.needs_input_grad[0]:
ctx.save_for_backward(index)
ctx.input_size = tensor.size()
return tensor.index_select(dim, index)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_tensor = None
if ctx.needs_input_grad[0]:
index, = ctx.saved_tensors
grad_tensor = grad_output.new(*ctx.input_size).zero_()
grad_tensor.index_add_(ctx.dim, index, grad_output)
return grad_tensor, None, None
class Concat(Function):
@staticmethod
def forward(ctx, dim, *inputs):
ctx.dim = dim
ctx.input_sizes = [i.size(dim) for i in inputs]
return torch.cat(inputs, dim)
@staticmethod
def backward(ctx, grad_output):
return (None,) + tuple(grad_output.narrow(ctx.dim, end - size, size) for size, end
in zip(ctx.input_sizes, _accumulate(ctx.input_sizes)))
# TODO: deprecate this
class Resize(Function):
@staticmethod
def forward(ctx, tensor, sizes):
ctx.sizes = sizes
ctx.numel = reduce(lambda x, y: x * y, sizes, 1)
if tensor.numel() != ctx.numel:
raise RuntimeError(("requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. ").format(
'x'.join(map(str, sizes)), ctx.numel,
'x'.join(map(str, tensor.size())), tensor.numel()))
ctx.input_sizes = tensor.size()
if tensor.is_contiguous():
result = tensor.new(tensor).contiguous().view(*sizes)
ctx.mark_shared_storage((tensor, result))
return result
else:
return tensor.contiguous().view(*sizes)
@staticmethod
def backward(ctx, grad_output):
assert grad_output.numel() == ctx.numel
return grad_output.contiguous().view(ctx.input_sizes), None
class Clone(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Squeeze(Function):
@staticmethod
def forward(ctx, input, dim=None):
ctx.dim = dim
ctx.input_size = input.size()
if dim is not None:
result = input.squeeze(dim)
else:
result = input.squeeze()
ctx.mark_shared_storage((input, result))
return result
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous().view(ctx.input_size), None
class Unsqueeze(Function):
@staticmethod
def forward(ctx, input, dim):
ctx.dim = dim
result = input.unsqueeze(dim)
ctx.mark_shared_storage((input, result))
return result
@staticmethod
def backward(ctx, grad_output):
return grad_output.squeeze(ctx.dim), None
class MaskedCopy(InplaceFunction):
@staticmethod
def forward(ctx, tensor1, mask, tensor2, inplace=False):
assert not ctx.needs_input_grad[1], "MaskedCopy can't differentiate the mask"
if not inplace:
tensor1 = tensor1.clone()
else:
ctx.mark_dirty(tensor1)
ctx.save_for_backward(mask)
return tensor1.masked_copy_(mask, tensor2)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
grad_tensor1 = grad_tensor2 = None
if ctx.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().masked_fill_(mask, 0)
if ctx.needs_input_grad[2]:
grad_tensor2 = grad_output.masked_select(mask)
return grad_tensor1, None, grad_tensor2, None
class MaskedFill(InplaceFunction):
@staticmethod
def forward(ctx, tensor, mask, value, inplace=False):
assert not ctx.needs_input_grad[1], "MaskedFill can't differentiate the mask"
if not inplace:
tensor = tensor.clone()
else:
ctx.mark_dirty(tensor)
ctx.save_for_backward(mask)
return tensor.masked_fill_(mask, value)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
grad_tensor = None
if ctx.needs_input_grad[0]:
grad_tensor = grad_output.clone().masked_fill_(mask, 0)
return grad_tensor, None, None, None
class MaskedSelect(Function):
@staticmethod
def forward(ctx, tensor, mask):
assert not ctx.needs_input_grad[1], "MaskedSelect can't differentiate the mask"
ctx.input_size = tensor.size()
ctx.save_for_backward(mask)
return tensor.masked_select(mask)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
grad_tensor = None
if ctx.needs_input_grad[0]:
grad_tensor = grad_output.new(ctx.input_size).zero_()
grad_tensor.masked_copy_(mask, grad_output)
return grad_tensor, None
class _MultiSelectionFunction(Function):
@staticmethod
def forward(ctx, input, dim, return_indices, args):
fn = getattr(input, ctx._forward_cls.__name__.lower())
ctx.return_indices = return_indices
ctx.input_size = input.size()
ctx.dim = dim
output, indices = fn(*args)
if return_indices:
ctx.save_for_backward(indices)
ctx.mark_non_differentiable(indices)
return output, indices
else:
ctx.indices = indices
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output, grad_indices=None):
grad_input = grad_output.new(ctx.input_size).zero_()
if ctx.return_indices:
indices, = ctx.saved_tensors
else:
indices = ctx.indices
dim = ctx.dim if ctx.dim is not None else grad_output.dim() - 1
return (grad_input.scatter_(dim, indices, grad_output),) + (None,) * ctx.num_flags
class Sort(_MultiSelectionFunction):
@staticmethod
def forward(ctx, input, dim=None, descending=False, return_indices=True):
ctx.dim = dim if dim is not None else input.dim() - 1
args = (ctx.dim, descending)
ctx.num_flags = 3
return _MultiSelectionFunction.forward(ctx, input, dim, return_indices, args)
class Topk(_MultiSelectionFunction):
@staticmethod
def forward(ctx, input, k, dim=None, largest=True, sort=True, return_indices=True):
ctx.dim = dim if dim is not None else input.dim() - 1
args = (k, ctx.dim, largest, sort)
ctx.num_flags = 5
return _MultiSelectionFunction.forward(ctx, input, dim, return_indices, args)
class Chunk(Function):
@staticmethod
def forward(ctx, i, num_chunks, dim=0):
ctx.dim = dim
result = i.chunk(num_chunks, dim)
ctx.mark_shared_storage(*((i, chunk) for chunk in result))
return result
@staticmethod
def backward(ctx, *grad_output):
grad_input = torch.cat(grad_output, ctx.dim)
return grad_input, None, None
class Gather(Function):
@staticmethod
def forward(ctx, input, dim, index):
assert not ctx.needs_input_grad[2], "Gather can't differentiate the index"
ctx.input_size = input.size()
ctx.save_for_backward(index)
ctx.dim = dim
return input.gather(dim, index)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
index, = ctx.saved_tensors
grad_input = grad_output.new(ctx.input_size).zero_()
return grad_input.scatter_(ctx.dim, index, grad_output), None, None
class Scatter(InplaceFunction):
@staticmethod
def forward(ctx, input, dim, index, source, inplace=False):
assert not ctx.needs_input_grad[2], "Scatter can't differentiate the index"
ctx.dim = dim
if inplace:
ctx.mark_dirty(input)
else:
input = input.clone()
ctx.save_for_backward(index)
return input.scatter_(ctx.dim, index, source)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
index, = ctx.saved_tensors
grad_input = grad_source = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
grad_input.scatter_(ctx.dim, index, 0)
if ctx.needs_input_grad[3]:
grad_source = grad_output.gather(ctx.dim, index)
return grad_input, None, None, grad_source, None
class Repeat(Function):
@staticmethod
def forward(ctx, input, repeats):
ctx.repeats = repeats
return input.repeat(repeats)
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_input = grad_output
for dim, repeat in enumerate(ctx.repeats):
if repeat == 1:
continue
grad_input = sum(grad_input.chunk(repeat, dim))
return grad_input, None
class Cumsum(Function):
def __init__(self, dim):
super(Cumsum, self).__init__()
self.dim = dim
def forward(self, input):
return torch.cumsum(input, dim=self.dim)
def backward(self, grad_output):
grad_input = torch.cumsum(-grad_output, dim=self.dim)
| |
could be found for computing the components
in the Coordinate frame (V, (d/du,d/dv))
Since the identity map is a special element, its components cannot be
changed::
sage: id = M.tangent_identity_field()
sage: id.add_comp(e)[0,1] = u*v
Traceback (most recent call last):
...
AssertionError: the components of the identity map cannot be changed
"""
if self._is_identity:
raise AssertionError("the components of the identity map cannot be "
"changed")
return TensorField._set_comp_unsafe(self, basis=basis)
def add_comp(self, basis=None):
r"""
Return the components of ``self`` w.r.t. a given module basis for
assignment, keeping the components w.r.t. other bases.
To delete the components w.r.t. other bases, use the method
:meth:`set_comp` instead.
INPUT:
- ``basis`` -- (default: ``None``) basis in which the components are
defined; if none is provided, the components are assumed to refer to
the module's default basis
.. WARNING::
If the automorphism field has already components in other bases, it
is the user's responsibility to make sure that the components
to be added are consistent with them.
OUTPUT:
- components in the given basis, as an instance of the
class :class:`~sage.tensor.modules.comp.Components`;
if such components did not exist previously, they are created
EXAMPLES::
sage: M = Manifold(2, 'M') # the 2-dimensional sphere S^2
sage: U = M.open_subset('U') # complement of the North pole
sage: c_xy.<x,y> = U.chart() # stereographic coordinates from the North pole
sage: V = M.open_subset('V') # complement of the South pole
sage: c_uv.<u,v> = V.chart() # stereographic coordinates from the South pole
sage: M.declare_union(U,V) # S^2 is the union of U and V
sage: e_uv = c_uv.frame()
sage: a= M.automorphism_field(name='a')
sage: a.add_comp(e_uv)
2-indices components w.r.t. Coordinate frame (V, (d/du,d/dv))
sage: a.add_comp(e_uv)[0,0] = u+v
sage: a.add_comp(e_uv)[1,1] = u+v
sage: a.display(e_uv)
a = (u + v) d/du*du + (u + v) d/dv*dv
Setting the components in a new frame::
sage: e = V.vector_frame('e')
sage: a.add_comp(e)
2-indices components w.r.t. Vector frame (V, (e_0,e_1))
sage: a.add_comp(e)[0,1] = u*v
sage: a.add_comp(e)[1,0] = u*v
sage: a.display(e)
a = u*v e_0*e^1 + u*v e_1*e^0
The components with respect to ``e_uv`` are kept::
sage: a.display(e_uv)
a = (u + v) d/du*du + (u + v) d/dv*dv
Since the identity map is a special element, its components cannot be
changed::
sage: id = M.tangent_identity_field()
sage: id.add_comp(e)[0,1] = u*v
Traceback (most recent call last):
...
AssertionError: the components of the identity map cannot be changed
"""
if self._is_identity:
raise AssertionError("the components of the identity map cannot be "
"changed")
return TensorField._add_comp_unsafe(self, basis=basis)
def _new_instance(self):
r"""
Create an instance of the same class as ``self`` on the same
vector field module.
TESTS::
sage: M = Manifold(5, 'M')
sage: a = M.automorphism_field(name='a')
sage: a._new_instance()
Field of tangent-space automorphisms on the 5-dimensional
differentiable manifold M
sage: a._new_instance().parent() is a.parent()
True
"""
return type(self)(self._vmodule)
def __call__(self, *arg):
r"""
Redefinition of
:meth:`~sage.manifolds.differentiable.tensorfield.TensorField.__call__`
to allow for a proper treatment of the identity map and of the call
with a single argument
TESTS:
Field of identity maps on the 2-sphere::
sage: M = Manifold(2, 'M') # the 2-dimensional sphere S^2
sage: U = M.open_subset('U') # complement of the North pole
sage: c_xy.<x,y> = U.chart() # stereographic coordinates from the North pole
sage: V = M.open_subset('V') # complement of the South pole
sage: c_uv.<u,v> = V.chart() # stereographic coordinates from the South pole
sage: M.declare_union(U,V) # S^2 is the union of U and V
sage: xy_to_uv = c_xy.transition_map(c_uv, (x/(x^2+y^2), y/(x^2+y^2)),
....: intersection_name='W', restrictions1= x^2+y^2!=0,
....: restrictions2= u^2+v^2!=0)
sage: uv_to_xy = xy_to_uv.inverse()
sage: e_xy = c_xy.frame(); e_uv = c_uv.frame()
sage: w = M.vector_field({e_xy: [3, 1]}, name='w')
sage: w.add_comp_by_continuation(e_uv, U.intersection(V), c_uv)
sage: z = M.one_form({e_xy: [-y, x]}, name='z')
sage: z.add_comp_by_continuation(e_uv, U.intersection(V), c_uv)
sage: Id = M.tangent_identity_field()
sage: s = Id(w); s
Vector field w on the 2-dimensional differentiable manifold M
sage: s == w
True
sage: s = Id(z, w); s
Scalar field z(w) on the 2-dimensional differentiable manifold M
sage: s == z(w)
True
Field of automorphisms on the 2-sphere::
sage: a = M.automorphism_field({e_xy: [[-1, 0], [0, 1]]}, name='a')
sage: a.add_comp_by_continuation(e_uv, U.intersection(V), c_uv)
Call with a single argument::
sage: s = a(w); s
Vector field a(w) on the 2-dimensional differentiable manifold M
sage: s.display(e_xy)
a(w) = -3 d/dx + d/dy
sage: s.display(e_uv)
a(w) = (3*u^2 - 2*u*v - 3*v^2) d/du + (u^2 + 6*u*v - v^2) d/dv
sage: s.restrict(U) == a.restrict(U)(w.restrict(U))
True
sage: s.restrict(V) == a.restrict(V)(w.restrict(V))
True
sage: s.restrict(U) == a(w.restrict(U))
True
sage: s.restrict(U) == a.restrict(U)(w)
True
Call with two arguments::
sage: s = a(z, w); s
Scalar field a(z,w) on the 2-dimensional differentiable manifold M
sage: s.display()
a(z,w): M --> R
on U: (x, y) |--> x + 3*y
on V: (u, v) |--> (u + 3*v)/(u^2 + v^2)
sage: s.restrict(U) == a.restrict(U)(z.restrict(U), w.restrict(U))
True
sage: s.restrict(V) == a.restrict(V)(z.restrict(V), w.restrict(V))
True
sage: s.restrict(U) == a(z.restrict(U), w.restrict(U))
True
sage: s.restrict(U) == a(z, w.restrict(U))
True
"""
if self._is_identity:
if len(arg) == 1:
# The identity map acting as such, on a vector field:
vector = arg[0]
if vector._tensor_type != (1,0):
raise TypeError("the argument must be a vector field")
dom = self._domain.intersection(vector._domain)
return vector.restrict(dom)
elif len(arg) == 2:
# self acting as a type-(1,1) tensor on a pair
# (1-form, vector field), returning a scalar field:
oneform = arg[0]
vector = arg[1]
dom = self._domain.intersection(
oneform._domain).intersection(vector._domain)
return oneform.restrict(dom)(vector.restrict(dom))
else:
raise TypeError("wrong number of arguments")
# Generic case
if len(arg) == 1:
# The field of automorphisms acting on a vector field:
vector = arg[0]
if vector._tensor_type != (1,0):
raise TypeError("the argument must be a vector field")
dom = self._domain.intersection(vector._domain)
vector_dom = vector.restrict(dom)
if dom != self._domain:
return self.restrict(dom)(vector_dom)
resu = dom.vector_field()
if self._name is not None and vector._name is not None:
resu._name = self._name + "(" + vector._name + ")"
if self._latex_name is not None and vector._latex_name is not None:
resu._latex_name = self._latex_name + r"\left(" + \
vector._latex_name + r"\right)"
for sdom, automorph in self._restrictions.items():
resu._restrictions[sdom] = automorph(vector_dom.restrict(sdom))
return resu
# Case of 2 arguments:
return TensorField.__call__(self, *arg)
#### MultiplicativeGroupElement methods ####
def __invert__(self):
r"""
Return the inverse automorphism of ``self``.
EXAMPLES:
Inverse of a field of tangent-space automorphisms on a
non-parallelizable 2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: W = U.intersection(V)
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y),
....: intersection_name='W', restrictions1= x>0, restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: eU = c_xy.frame() ; eV = c_uv.frame()
sage: a = M.automorphism_field({eU: [[1,x], [0,2]]}, name='a')
sage: a.add_comp_by_continuation(eV, W, c_uv)
sage: ia = a.inverse() ; ia
Field of tangent-space automorphisms a^(-1) on the 2-dimensional
differentiable manifold M
sage: a[eU,:], ia[eU,:]
(
[1 x] [ 1 -1/2*x]
[0 2], [ 0 1/2]
)
sage: a[eV,:], ia[eV,:]
(
[ 1/4*u + 1/4*v + 3/2 -1/4*u - 1/4*v - 1/2]
[ 1/4*u + 1/4*v - 1/2 -1/4*u - 1/4*v + 3/2],
[-1/8*u - 1/8*v + 3/4 1/8*u + 1/8*v + 1/4]
[-1/8*u - 1/8*v + 1/4 1/8*u + 1/8*v + 3/4]
)
Let us check that ia is indeed the inverse of a::
sage: s = a.contract(ia)
sage: s[eU,:], s[eV,:]
(
[1 0] [1 0]
[0 1], [0 1]
)
sage: s = ia.contract(a)
sage: s[eU,:], s[eV,:]
(
[1 0] [1 0]
[0 1], [0 1]
)
The result is cached::
sage: a.inverse() is ia
True
Instead of ``inverse()``, one can use the power minus one to get the
inverse::
sage: ia is a^(-1)
True
or the operator ``~``::
sage: ia is ~a
True
"""
if self._is_identity:
return self
if self._inverse is None:
from sage.tensor.modules.format_utilities import is_atomic
if self._name is None:
inv_name = None
else:
if is_atomic(self._name, ['*']):
inv_name = self._name + '^(-1)'
else:
inv_name = '(' + self._name + ')^(-1)'
if self._latex_name is None:
inv_latex_name = None
else:
if is_atomic(self._latex_name, ['\\circ', '\\otimes']):
inv_latex_name = self._latex_name + r'^{-1}'
else:
inv_latex_name = r'\left(' + self._latex_name + | |
G T", None),
("Sat, 10 Oct 2009 13:47:21 GM ", None),
# Replace numeric elements with stuff that contains A-Z
("Fri, Burp Oct 2009 13:47:21 GMT", None),
("Fri, 10 Tabalqplar 2009 13:47:21 GMT", None),
("Sat, 10 Oct Fruit 13:47:21 GMT", None),
("Sat, 10 Oct 2009 13:47:21 Fruits", None),
# Weekday
(", Dec 31 00:00:00 2003", None),
("T, Dec 31 00:00:00 2003", None),
("Tu, Dec 31 00:00:00 2003", None),
("Hi, Dec 31 00:00:00 2003", None),
("Heretounforeseen, Dec 31 00:00:00 2003", None),
("Wednesday2, Dec 31 00:00:00 2003", None),
("Mon\x00frobs, Dec 31 00:00:00 2003", None),
("Mon\x10day, Dec 31 00:00:00 2003", None),
# Day of month
("Fri, Oct 2009 13:47:21 GMT", None),
("Fri, 110 Oct 2009 13:47:21 GMT", None),
("Fri, 0 Oct 2009 13:47:21 GMT", None),
("Fri, 00 Oct 2009 13:47:21 GMT", None),
("Fri, 0 Oct 2009 13:47:21 GMT", None),
("Fri, 0 Oct 2009 13:47:21 GMT", None),
("Fri, 00 Oct 2009 13:47:21 GMT", None),
("Fri, 33 Oct 2009 13:47:21 GMT", None),
("Fri, 40 Oct 2009 13:47:21 GMT", None),
("Fri, A2 Oct 2009 13:47:21 GMT", None),
("Fri, 2\x00 Oct 2009 13:47:21 GMT", None),
("Fri, \t3 Oct 2009 13:47:21 GMT", None),
("Fri, 3\t Oct 2009 13:47:21 GMT", None),
# Month
("Fri, 10 2009 13:47:21 GMT", None),
("Fri, 10 O 2009 13:47:21 GMT", None),
("Fri, 10 Oc 2009 13:47:21 GMT", None),
("Sat, 10 Octuarial 2009 13:47:21 GMT", None),
("Sat, 10 Octuary 2009 13:47:21 GMT", None),
("Sat, 10 Octubre 2009 13:47:21 GMT", None),
# Year
("Sat, 10 Oct 009 13:47:21 GMT", None),
("Sat, 10 Oct 200 13:47:21 GMT", None),
("Sat, 10 Oct 209 13:47:21 GMT", None),
("Sat, 10 Oct 20 9 13:47:21 GMT", None),
# Hour
("Sat, 10 Oct 2009 25:47:21 GMT", None),
("Sat, 10 Oct 2009 1@:47:21 GMT", None),
# Minute
("Sat, 10 Oct 2009 13:71:21 GMT", None),
("Sat, 10 Oct 2009 13:61:21 GMT", None),
("Sat, 10 Oct 2009 13:60:21 GMT", None),
("Sat, 10 Oct 2009 24:01:00 GMT", None),
# Second
("Sat, 10 Oct 2009 13:47 GMT", "Sat, 10 Oct 2009 13:47:00 GMT"),
("Sat, 10 Oct 2009 13:47:00 GMT", "Sat, 10 Oct 2009 13:47:00 GMT"),
("Sat, 10 Oct 2009 24:00:01 GMT", None),
# Some reasonable cases (ignore weekday)
("Mon Dec 24 16:32:39 1977 GMT", "Sat, 24 Dec 1977 16:32:39 GMT"),
("Sat, 7 Dec 1991 13:56:05 GMT", "Sat, 07 Dec 1991 13:56:05 GMT"),
("Saturday, 8-Mar-2012 21:35:09 GMT", "Thu, 08 Mar 2012 21:35:09 GMT"),
("Sun, 1-Feb-1998 00:00:00 GMT", "Sun, 01 Feb 1998 00:00:00 GMT"),
("Thursday, 01-Jan-1983 01:01:01 GMT",
"Sat, 01 Jan 1983 01:01:01 GMT"),
("Tue, 15-Nov-1973 22:23:24 GMT", "Thu, 15 Nov 1973 22:23:24 GMT"),
("Wed, 09 Dec 1999 23:59:59 GMT", "Thu, 09 Dec 1999 23:59:59 GMT"),
("Mon, 12-May-05 20:25:03 GMT", "Thu, 12 May 2005 20:25:03 GMT"),
("Thursday, 01-Jan-12 09:00:00 GMT", "Sun, 01 Jan 2012 09:00:00 GMT"),
# starts like asctime, but flips the time and year - nonsense
("Wed Mar 12 2007 08:25:07 GMT", None),
# starts like RFC 1123, but flips the time and year - nonsense
("Thu, 31 Dec 23:55:55 2107 GMT", None),
('Fri, 21-May-2004 10:40:51 GMT', "Fri, 21 May 2004 10:40:51 GMT"),
# extra 2-digit year exercises
("Sat, 10 Oct 11 13:47:21 GMT", "Mon, 10 Oct 2011 13:47:21 GMT"),
("Sat, 10 Oct 09 13:47:22 GMT", "Sat, 10 Oct 2009 13:47:22 GMT"),
("Sat, 10 Oct 93 13:47:23 GMT", "Sun, 10 Oct 1993 13:47:23 GMT"),
("Sat, 10 Oct 85 13:47:24 GMT", "Thu, 10 Oct 1985 13:47:24 GMT"),
("Sat, 10 Oct 70 13:47:25 GMT", "Sat, 10 Oct 1970 13:47:25 GMT"),
("Sat, 10 Oct 69 13:47:26 GMT", "Thu, 10 Oct 2069 13:47:26 GMT"),
# dealing with 3-digit year is incredibly tedious, will do as needed
("Sat, 10 Oct 969 13:47:26 GMT", None),
("Sat, 10 Oct 9 13:47:26 GMT", None),
("Fri, 10 Oct 19691 13:47:26 GMT", None),
]
def change(string, position, new_value):
"Macro to change a string"
return string[:position] + new_value + string[position + 1:]
original = "Sat, 10 Oct 2009 13:47:21 GMT"
# Stuff garbage in every position - none of these characters should
# ever be allowed in a date string.
# not included because pytest chokes: "¿�␦"
bad_chars = "/<>()\\*$#&=;\x00\b\f\n\r\"\'`?"
for pos in range(0, len(original)):
for bad_char in bad_chars:
cases.append((change(original, pos, bad_char), None))
# Invalidate each letter
letter_positions = [i for (i, c) in enumerate(original) \
if re.match("[A-Za-z]", c)]
for pos in letter_positions:
cases.append((change(original, pos, 'q'), None))
cases.append((change(original, pos, '0'), None))
cases.append((change(original, pos, '-'), None))
cases.append((change(original, pos, ''), None))
# But do tolerate case changes.
c = original[pos]
if c.isupper():
c = c.lower()
else:
c = c.upper()
cases.append((change(original, pos, c), original))
# Invalidate each digit
digit_positions = [i for (i, c) in enumerate(original) \
if c in "0123456789"]
for pos in digit_positions:
c = original[pos]
cases.append((change(original, pos, 'q'), None))
cases.append((change(original, pos, '-' + c), None))
cases.append((change(original, pos, '+' + c), None))
# Invalidate each space
space_positions = [i for (i, c) in enumerate(original) \
if c in " \t\n\r"]
for pos in space_positions:
cases.append((change(original, pos, 'x'), None))
cases.append((change(original, pos, '\t'), None))
cases.append((change(original, pos, ' '), None))
cases.append((change(original, pos, ''), None))
# Invalidate each colon
colon_positions = [i for (i, c) in enumerate(original) \
if c == ":"]
for pos in colon_positions:
cases.append((change(original, pos, 'z'), None))
cases.append((change(original, pos, '0'), None))
cases.append((change(original, pos, ' '), None))
cases.append((change(original, pos, ''), None))
for data, ideal in cases:
actual = render_date(parse_date(data))
assert actual == ideal
def runner(function):
"""Generate a function which collects the result/exception from another
function, for easier assertions.
"""
def run(*args, **kwargs):
"Function which collects result/exception"
actual_result, actual_exception = None, None
try:
actual_result = function(*args, **kwargs)
except Exception as exception:
actual_exception = exception
return actual_exception or actual_result
return run
# Define cases for testing parsing and rendering.
# Format: input, kwargs, expected parse_request result, expected parse_response
# result.
HEADER_CASES = [
# cases with nothing that can be parsed out result in
# InvalidCookieError. unless ignore_bad_cookies=True, then they give an
# empty Cookies().
("", {},
InvalidCookieError,
InvalidCookieError),
('a', {},
InvalidCookieError,
InvalidCookieError),
(" ", {},
InvalidCookieError,
InvalidCookieError),
(";;;;;", {},
InvalidCookieError,
InvalidCookieError),
("qwejrkqlwjere", {},
InvalidCookieError,
InvalidCookieError),
# vacuous headers should give invalid
('Cookie: ', {},
InvalidCookieError,
InvalidCookieError),
('Set-Cookie: ', {},
InvalidCookieError,
InvalidCookieError),
# Single pair should work the same as request or response
("foo=bar", {},
Cookies(foo='bar'),
Cookies(foo='bar')),
("SID=242d96421d4e", {},
Cookies(SID='242d96421d4e'),
Cookies(SID='242d96421d4e')),
# Two pairs on SAME line should work with request, fail with response.
# if ignore_bad_attributes, response should not raise.
# and ignore_bad_attributes behavior should be default
("a=b; c=dx", {'ignore_bad_attributes': True},
Cookies(a='b', c='dx'),
Cookies(a='b')),
("a=b; c=d", {'ignore_bad_attributes': False},
Cookies(a='b', c='d'),
InvalidCookieAttributeError),
('g=h;j=k', {},
Cookies(g='h', j='k'),
Cookies(g='h')),
# tolerance: response shouldn't barf on unrecognized attr by default,
# but request should recognize as malformed
('a=b; brains', {},
InvalidCookieError,
Cookies(a='b')),
# tolerance: should strip quotes and spaces
('A="BBB"', {},
Cookies(A='BBB'),
Cookies(A='BBB'),
),
('A= "BBB" ', {},
Cookies(A='BBB'),
Cookies(A='BBB'),
),
# tolerance: should ignore dumb trailing ;
('foo=bar;', {},
Cookies(foo='bar'),
Cookies(foo='bar'),
),
('A="BBB";', {},
Cookies(A='BBB'),
Cookies(A='BBB'),
),
('A= "BBB" ;', {},
Cookies(A='BBB'),
Cookies(A='BBB'),
),
# empty value
("lang=; Expires=Sun, 06 Nov 1994 08:49:37 GMT", {},
InvalidCookieError,
Cookies(
Cookie('lang', '',
expires=parse_date(
"Sun, 06 Nov 1994 08:49:37 GMT")))),
# normal examples of varying complexity
("frob=varvels; Expires=Wed, 09 Jun 2021 10:18:14 GMT", {},
InvalidCookieError,
Cookies(
Cookie('frob', 'varvels',
expires=parse_date(
"Wed, 09 Jun 2021 10:18:14 GMT"
)))),
("lang=en-US; Expires=Wed, 03 Jun 2019 10:18:14 GMT", {},
InvalidCookieError,
Cookies(
Cookie('lang', 'en-US',
expires=parse_date(
"Wed, 03 Jun 2019 10:18:14 GMT"
)))),
# easily interpretable as multiple request cookies!
("CID=39b4d9be4d42; Path=/; Domain=example.com", {},
Cookies(CID="39b4d9be4d42", Path='/', Domain='example.com'),
Cookies(Cookie('CID', '39b4d9be4d42', path='/',
domain='example.com'))),
("lang=en-US; Path=/; Domain=example.com", {},
Cookies(lang='en-US', Path='/', Domain='example.com'),
Cookies(Cookie('lang', 'en-US',
path='/', domain='example.com'))),
("foo=bar; path=/; expires=Mon, 04-Dec-2001 12:43:00 GMT", {},
InvalidCookieError,
Cookies(
Cookie('foo', 'bar', path='/',
expires=parse_date("Mon, 04-Dec-2001 12:43:00 GMT")
))),
("SID=0fae49; Path=/; Secure; HttpOnly", {},
InvalidCookieError,
Cookies(Cookie('SID', '0fae49',
path='/', secure=True, httponly=True))),
('TMID=DQAAXKEaeo_aYp; Domain=mail.nauk.com; '
'Path=/accounts; Expires=Wed, 13-Jan-2021 22:23:01 GMT; '
'Secure; HttpOnly', {},
InvalidCookieError,
Cookies(
Cookie('TMID', 'DQAAXKEaeo_aYp',
domain='mail.nauk.com',
path='/accounts', secure=True, httponly=True,
expires=parse_date("Wed, 13-Jan-2021 22:23:01 GMT")
))),
("test=some_value; expires=Sat, 01-Jan-2000 00:00:00 GMT; "
"path=/;", {},
InvalidCookieError,
Cookies(
Cookie('test', 'some_value', path='/',
expires=parse_date('Sat, 01 Jan | |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import os
import os.path as op
from pathlib import Path
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, make_sphere_model, use_coil_def,
setup_volume_source_space, read_forward_solution,
VolVectorSourceEstimate, convert_forward_solution,
compute_source_morph, MixedSourceEstimate)
from mne.io import (read_raw_ctf, read_raw_bti, read_raw_kit, read_info,
read_raw_nirx)
from mne.io._digitization import write_dig
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
snapshot_brain_montage, plot_head_positions,
plot_alignment, plot_volume_source_estimates,
plot_sensors_connectivity, plot_brain_colorbar,
link_brains, mne_analyze_colormap)
from mne.viz._3d import _process_clim, _linearize_map, _get_map_ticks
from mne.viz.utils import _fake_click
from mne.utils import (requires_pysurfer, run_tests_if_main,
requires_nibabel, requires_dipy,
traits_test, requires_version, catch_logging,
run_subprocess, modified_env)
from mne.datasets import testing
from mne.source_space import read_source_spaces
from mne.bem import read_bem_solution, read_bem_surfaces
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
ctf_fname = op.join(data_dir, 'CTF', 'testdata_ctf.ds')
nirx_fname = op.join(data_dir, 'NIRx', 'nirx_15_2_recording_w_short')
io_dir = op.join(op.abspath(op.dirname(__file__)), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fwd_fname2 = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
base_dir = op.join(io_dir, 'bti', 'tests', 'data')
pdf_fname = op.join(base_dir, 'test_pdf_linux')
config_fname = op.join(base_dir, 'test_config_linux')
hs_fname = op.join(base_dir, 'test_hs_linux')
sqd_fname = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
coil_3d = """# custom cube coil def
1 9999 1 8 3e-03 0.000e+00 "QuSpin ZFOPM 3mm cube"
0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
"""
def test_plot_head_positions():
"""Test plotting of head positions."""
info = read_info(evoked_fname)
pos = np.random.RandomState(0).randn(4, 10)
pos[:, 0] = np.arange(len(pos))
destination = (0., 0., 0.04)
with pytest.warns(None): # old MPL will cause a warning
plot_head_positions(pos)
plot_head_positions(pos, mode='field', info=info,
destination=destination)
plot_head_positions([pos, pos]) # list support
pytest.raises(ValueError, plot_head_positions, ['pos'])
pytest.raises(ValueError, plot_head_positions, pos[:, :9])
pytest.raises(ValueError, plot_head_positions, pos, 'foo')
with pytest.raises(ValueError, match='shape'):
plot_head_positions(pos, axes=1.)
plt.close('all')
@testing.requires_testing_data
@requires_pysurfer
@traits_test
def test_plot_sparse_source_estimates(renderer_interactive):
"""Test plotting of (sparse) source estimates."""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
plot_source_estimates(stc, 'sample', colormap=colormap,
background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True,
clim='auto')
pytest.raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto',
subjects_dir=subjects_dir)
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
surf = plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
if renderer_interactive._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(surf, mayavi.modules.surface.Surface)
@testing.requires_testing_data
@traits_test
def test_plot_evoked_field(renderer):
"""Test plotting evoked field."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
with pytest.warns(RuntimeWarning, match='projection'):
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1,
ch_type=t)
fig = evoked.plot_field(maps, time=0.1)
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@traits_test
def test_plot_alignment(tmpdir, renderer):
"""Test plotting of -trans.fif files and MEG sensor layouts."""
# generate fiducials file for testing
tempdir = str(tmpdir)
fiducials_path = op.join(tempdir, 'fiducials.fif')
fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
'r': [-0.08061612, -0.02908875, -0.04131077]},
{'coord_frame': 5, 'ident': 2, 'kind': 1,
'r': [0.00146763, 0.08506715, -0.03483611]},
{'coord_frame': 5, 'ident': 3, 'kind': 1,
'r': [0.08436285, -0.02850276, -0.04127743]}]
write_dig(fiducials_path, fid, 5)
renderer.backend._close_all()
evoked = read_evokeds(evoked_fname)[0]
sample_src = read_source_spaces(src_fname)
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
meg = ['helmet', 'sensors']
if system == 'KIT':
meg.append('ref')
fig = plot_alignment(info, read_trans(trans_fname), subject='sample',
subjects_dir=subjects_dir, meg=meg)
rend = renderer.backend._Renderer(fig=fig)
rend.close()
# KIT ref sensor coil def is defined
renderer.backend._close_all()
info = infos['Neuromag']
pytest.raises(TypeError, plot_alignment, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
pytest.raises(OSError, plot_alignment, info, trans_fname,
subject='sample', subjects_dir=subjects_dir, src='foo')
pytest.raises(ValueError, plot_alignment, info, trans_fname,
subject='fsaverage', subjects_dir=subjects_dir,
src=sample_src)
sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True,
brain='white')
renderer.backend._close_all()
# no-head version
renderer.backend._close_all()
# all coord frames
pytest.raises(ValueError, plot_alignment, info)
plot_alignment(info, surfaces=[])
for coord_frame in ('meg', 'head', 'mri'):
fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True,
coord_frame=coord_frame, trans=Path(trans_fname),
subject='sample', mri_fiducials=fiducials_path,
subjects_dir=subjects_dir, src=src_fname)
renderer.backend._close_all()
# EEG only with strange options
evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj
evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog',
'EEG 002': 'seeg'})
with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample',
trans=trans_fname, subjects_dir=subjects_dir,
surfaces=['white', 'outer_skin', 'outer_skull'],
meg=['helmet', 'sensors'],
eeg=['original', 'projected'], ecog=True, seeg=True)
renderer.backend._close_all()
sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif'))
bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem.fif'))
sample_src[0]['coord_frame'] = 4 # hack for coverage
plot_alignment(info, subject='sample', eeg='projected',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain', 'inner_skull', 'outer_skull',
'outer_skin'])
plot_alignment(info, trans_fname, subject='sample', meg='helmet',
subjects_dir=subjects_dir, eeg='projected', bem=sphere,
surfaces=['head', 'brain'], src=sample_src)
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample', meg=[],
subjects_dir=subjects_dir, bem=bem_sol, eeg=True,
surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=bem_surfs)
# single-layer BEM can still plot head surface
assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
bem_sol_homog = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif'))
for use_bem in (bem_surfs[-1:], bem_sol_homog):
with catch_logging() as log:
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=use_bem,
verbose=True)
log = log.getvalue()
assert 'not find the surface for head in the provided BEM model' in log
# sphere model
sphere = make_sphere_model('auto', 'auto', evoked.info)
src = setup_volume_source_space(sphere=sphere)
plot_alignment(info, eeg='projected', meg='helmet', bem=sphere,
src=src, dig=True, surfaces=['brain', 'inner_skull',
'outer_skull', 'outer_skin'])
sphere = make_sphere_model('auto', None, evoked.info) # one layer
# no info is permitted
fig = plot_alignment(trans=trans_fname, subject='sample', meg=False,
coord_frame='mri', subjects_dir=subjects_dir,
surfaces=['brain'], bem=sphere, show_axes=True)
renderer.backend._close_all()
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
# 3D coil with no defined draw (ConvexHull)
info_cube = pick_info(info, [0])
info['dig'] = None
info_cube['chs'][0]['coil_type'] = 9999
with pytest.raises(RuntimeError, match='coil definition not found'):
plot_alignment(info_cube, meg='sensors', surfaces=())
coil_def_fname = op.join(tempdir, 'temp')
with open(coil_def_fname, 'w') as fid:
fid.write(coil_3d)
with use_coil_def(coil_def_fname):
plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)
# one layer bem with skull surfaces:
with pytest.raises(ValueError, match='sphere conductor model must have'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['brain', 'head', 'inner_skull'], bem=sphere)
# wrong eeg value:
with pytest.raises(ValueError, match='Invalid value for the .eeg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, eeg='foo')
# wrong meg value:
with pytest.raises(ValueError, match='Invalid value for the .meg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, meg='bar')
# multiple brain surfaces:
with pytest.raises(ValueError, match='Only one brain surface can be plot'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['white', 'pial'])
with pytest.raises(TypeError, match='all entries in surfaces must be'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=[1])
with pytest.raises(ValueError, match='Unknown surface type'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['foo'])
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fwd = read_forward_solution(fwd_fname)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
fwd = convert_forward_solution(fwd, force_fixed=True)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
# fNIRS
info = read_raw_nirx(nirx_fname).info
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
log = log.getvalue()
assert '26 fnirs pairs' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs='channels')
log = log.getvalue()
assert '26 fnirs locations' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs='pairs')
log = log.getvalue()
assert '26 fnirs pairs' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs='sources')
log = log.getvalue()
assert '26 fnirs sources' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs='detectors')
log = log.getvalue()
assert '26 fnirs detectors' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs=['channels', 'pairs'])
log = log.getvalue()
assert '26 fnirs pairs' in log
assert '26 fnirs locations' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs=['pairs', 'sources', 'detectors'])
log = log.getvalue()
assert '26 fnirs pairs' in log
assert '26 fnirs sources' in log
assert '26 fnirs detectors' in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs=['channels', 'pairs', 'sources', 'detectors'])
log = log.getvalue()
assert '26 fnirs pairs' in log
assert '26 fnirs locations' in log
assert '26 fnirs sources' in log
assert '26 fnirs detectors' in log
renderer.backend._close_all()
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_pysurfer
@traits_test
def test_process_clim_plot(renderer_interactive):
"""Test functionality for determining control | |
or pitch radius """
return self.min_radius - self.h_parameter / 4
@property
def external_thread_core_radius(self) -> float:
""" The radius of an internal thread object used to size an appropriate hole """
if self.hollow:
value = self.major_diameter / 2 - 7 * self.h_parameter / 8
else:
value = None
return value
def __init__(
self,
major_diameter: float,
pitch: float,
length: float,
hand: Literal["right", "left"] = "right",
hollow: bool = False,
simple: bool = False,
thread_angle: Optional[float] = 60.0, # Default to ISO standard
):
self.major_diameter = major_diameter
self.pitch = pitch
self.length = length
self.hollow = hollow
self.simple = simple
self.thread_angle = thread_angle
if hand not in ["right", "left"]:
raise ValueError(f'hand must be one of "right" or "left" not {hand}')
self.hand = hand
super().__init__()
def thread_profile(self) -> cq.Workplane:
"""
Generae a 2D profile of a single external thread based on this diagram:
https://en.wikipedia.org/wiki/ISO_metric_screw_thread#/media/File:ISO_and_UTS_Thread_Dimensions.svg
"""
# Note: starting the thread profile at the origin will result in inconsistent results when
# sweeping and extracting the outer edges
thread_profile = (
cq.Workplane("XZ")
.moveTo(self.thread_radius / 2, 0)
.lineTo(self.min_radius - self.h_parameter / 12, 0)
.spline(
[(self.min_radius, self.pitch / 8)],
tangents=[
(0, 1, 0),
(
sin(radians(90 - self.thread_angle / 2)),
cos(radians(90 - self.thread_angle / 2)),
),
],
includeCurrent=True,
)
.lineTo(self.major_diameter / 2, 7 * self.pitch / 16)
.lineTo(self.major_diameter / 2, 9 * self.pitch / 16)
.lineTo(self.min_radius, 7 * self.pitch / 8)
.spline(
[(self.min_radius - self.h_parameter / 12, self.pitch)],
tangents=[
(
-sin(radians(90 - self.thread_angle / 2)),
cos(radians(90 - self.thread_angle / 2)),
),
(0, 1, 0),
],
includeCurrent=True,
)
.lineTo(self.thread_radius / 2, self.pitch)
.close()
)
return thread_profile
def prepare_revolve_wires(self, thread_wire) -> Tuple:
if self.hollow:
inner_wires = [
cq.Wire.makeCircle(
radius=self.major_diameter / 2 - 7 * self.h_parameter / 8,
center=cq.Vector(0, 0, 0),
normal=cq.Vector(0, 0, 1),
)
]
else:
inner_wires = []
return (thread_wire, inner_wires)
class InternalThread(Thread):
""" Create a thread object used in a nut """
@property
def thread_radius(self) -> float:
""" The center of the thread radius or pitch radius """
return self.min_radius + 3 * self.h_parameter / 4
@property
def internal_thread_socket_radius(self) -> float:
""" The radius of an internal thread object used to size an appropriate hole """
return self.major_diameter / 2 + 3 * self.h_parameter / 4
def __init__(
self,
major_diameter: float,
pitch: float,
length: float,
hand: Literal["right", "left"] = "right",
simple: bool = False,
thread_angle: Optional[float] = 60.0, # Default to ISO standard
):
self.major_diameter = major_diameter
self.pitch = pitch
self.length = length
self.simple = simple
self.thread_angle = thread_angle
if hand not in ["right", "left"]:
raise ValueError(f'hand must be one of "right" or "left" not {hand}')
self.hand = hand
super().__init__()
def thread_profile(self) -> cq.Workplane:
"""
Generae a 2D profile of a single internal thread based on this diagram:
https://en.wikipedia.org/wiki/ISO_metric_screw_thread#/media/File:ISO_and_UTS_Thread_Dimensions.svg
"""
thread_profile = (
cq.Workplane("XZ")
.moveTo(self.thread_radius / 2, 0)
.lineTo(self.min_radius, 0)
.lineTo(self.min_radius, self.pitch / 8)
.lineTo(self.major_diameter / 2, 7 * self.pitch / 16)
.spline(
[(self.major_diameter / 2, 9 * self.pitch / 16)],
tangents=[
(
sin(radians(90 - self.thread_angle / 2)),
cos(radians(90 - self.thread_angle / 2)),
),
(
-sin(radians(90 - self.thread_angle / 2)),
cos(radians(90 - self.thread_angle / 2)),
),
],
includeCurrent=True,
)
.lineTo(self.min_radius, 7 * self.pitch / 8)
.lineTo(self.min_radius, self.pitch)
.lineTo(self.thread_radius / 2, self.pitch)
.close()
)
return thread_profile
def prepare_revolve_wires(self, thread_wire) -> Tuple:
outer_wire = cq.Wire.makeCircle(
radius=self.major_diameter / 2 + 3 * self.h_parameter / 4,
center=cq.Vector(0, 0, 0),
normal=cq.Vector(0, 0, 1),
)
return (outer_wire, [thread_wire])
class Nut(ABC):
""" Base Class used to create standard or custom threaded nuts """
@property
@classmethod
@abstractmethod
def metric_parameters(cls):
""" Each derived class must provide a metric_parameters dictionary """
return NotImplementedError
@property
@classmethod
@abstractmethod
def imperial_parameters(cls):
""" Each derived class must provide an imperial_parameters dictionary """
return NotImplementedError
@classmethod
def metric_sizes(cls) -> str:
""" Return a list of the standard screw sizes """
return list(cls.metric_parameters.keys())
@classmethod
def imperial_sizes(cls) -> str:
""" Return a list of the standard screw sizes """
return list(cls.imperial_parameters.keys())
def __init__(self):
if hasattr(self, "size"):
self.extract_nut_parameters()
self.cq_object = self.make_nut()
def extract_nut_parameters(self):
""" Parse the nut size string into thread_diameter, thread_pitch, width and thickness """
if self.size in self.metric_parameters.keys():
nut_data = self.metric_parameters[self.size]
self.width = nut_data["Width"]
self.thickness = nut_data["Height"]
size_parts = self.size.split("-")
self.thread_diameter = float(size_parts[0][1:])
self.thread_pitch = float(size_parts[1])
elif self.size in self.imperial_parameters.keys():
nut_data = self.imperial_parameters[self.size]
self.width = nut_data["Width"]
self.thickness = nut_data["Height"]
(self.thread_diameter, self.thread_pitch) = decode_imperial_size(self.size)
else:
raise ValueError(
f"Invalid nut size {self.size} - must be one of:"
f"{list(self.metric_parameters.keys())+list(self.imperial_parameters.keys())}"
)
def make_nut_body(self, internal_thread_socket_radius) -> cq.Workplane:
""" Replaced by derived class implementation """
def make_nut(self) -> cq.Solid:
""" Create an arbitrary sized nut """
thread = InternalThread(
major_diameter=self.thread_diameter,
pitch=self.thread_pitch,
length=self.thickness,
hand=self.hand,
simple=self.simple,
)
nut = (
self.make_nut_body(thread.internal_thread_socket_radius)
.union(thread.cq_object, glue=True)
.val()
)
return nut
class HexNut(Nut):
""" Create a hex nut """
metric_parameters = evaluate_parameter_dict(
read_fastener_parameters_from_csv("metric_hex_parameters.csv"), units="metric",
)
imperial_parameters = evaluate_parameter_dict(
read_fastener_parameters_from_csv("imperial_hex_parameters.csv"),
units="imperial",
)
@overload
def __init__(
self, size: str, hand: Literal["right", "left"] = "right", simple: bool = False,
):
...
@overload
def __init__(
self,
width: Optional[float] = None,
thread_diameter: Optional[float] = None,
thread_pitch: Optional[float] = None,
thickness: Optional[float] = None,
hand: Literal["right", "left"] = "right",
simple: bool = False,
):
...
@cache
def __init__(self, **kwargs):
self.hand = "right"
self.simple = False
for key, value in kwargs.items():
setattr(self, key, value)
super().__init__()
def make_nut_body(self, internal_thread_socket_radius) -> cq.Workplane:
""" Create a hex nut body with chamferred top and bottom """
# Distance across the tips of the hex
hex_diameter = self.width / cos(pi / 6)
# Chamfer between the hex tips and flats
chamfer_size = (hex_diameter - self.width) / 2
nut_body = (
cq.Workplane("XY")
.circle(hex_diameter / 2) # Create a circle that contains the hexagon
.circle(internal_thread_socket_radius) # .. with a hole in the center
.extrude(self.thickness)
.edges(cq.selectors.RadiusNthSelector(1))
.chamfer(chamfer_size / 2, chamfer_size) # Chamfer the circular edges
.intersect(
cq.Workplane("XY").polygon(6, hex_diameter).extrude(self.thickness)
)
)
return nut_body
class SquareNut(Nut):
""" Create a square nut """
metric_parameters = evaluate_parameter_dict(
read_fastener_parameters_from_csv("metric_hex_parameters.csv"), units="metric",
)
imperial_parameters = evaluate_parameter_dict(
read_fastener_parameters_from_csv("imperial_hex_parameters.csv"),
units="imperial",
)
@overload
def __init__(
self, size: str, hand: Literal["right", "left"] = "right", simple: bool = False,
):
...
@overload
def __init__(
self,
width: Optional[float] = None,
thread_diameter: Optional[float] = None,
thread_pitch: Optional[float] = None,
thickness: Optional[float] = None,
hand: Literal["right", "left"] = "right",
simple: bool = False,
):
...
@cache
def __init__(self, **kwargs):
self.hand = "right"
self.simple = False
for key, value in kwargs.items():
setattr(self, key, value)
super().__init__()
def make_nut_body(self, internal_thread_socket_radius) -> cq.Workplane:
nut_body = (
cq.Workplane("XY")
.rect(self.width, self.width)
.circle(internal_thread_socket_radius)
.extrude(self.thickness)
)
return nut_body
class Screw(ABC):
""" Base class for a set of threaded screws or bolts """
@property
def head(self):
""" A cadquery Solid thread as defined by class attributes """
return self.make_head()
@property
def shank(self):
""" A cadquery Solid thread as defined by class attributes """
return ExternalThread(
major_diameter=self.thread_diameter,
pitch=self.thread_pitch,
length=self.thread_length,
hand=self.hand,
simple=self.simple,
).make_shank(body_length=self.body_length)
@property
@classmethod
@abstractmethod
def metric_parameters(cls):
""" Each derived class must provide a metric_parameters dictionary """
return NotImplementedError
@property
@classmethod
@abstractmethod
def imperial_parameters(cls):
""" Each derived class must provide an imperial_parameters dictionary """
return NotImplementedError
@classmethod
def metric_sizes(cls) -> str:
""" Return a list of the standard screw sizes """
return list(cls.metric_parameters.keys())
@classmethod
def imperial_sizes(cls) -> str:
""" Return a list of the standard screw sizes """
return list(cls.imperial_parameters.keys())
@property
def cq_object(self):
""" A cadquery Solid thread as defined by class attributes """
# Class 'NotImplementedError' has no 'union' member isn't valid as
# make_head() is an abstractmethod defined in a derived class
return self.head.union(self.shank, glue=True).val()
def __init__(self):
""" Must be executed after __init__ in the derived class where instance variables
are assigned. Extract key parameters for standard sized screws """
if not hasattr(self, "length"):
raise AttributeError(
"the attribute 'length' must be set in the derived class of Screw"
)
length = self.length
if hasattr(self, "size"):
size_parts = self.size.split("-")
if self.size in self.metric_parameters.keys():
screw_data = self.metric_parameters[self.size]
self.thread_diameter = float(size_parts[0][1:])
self.thread_pitch = float(size_parts[1])
elif self.size in self.imperial_parameters.keys():
screw_data = self.imperial_parameters[self.size]
(self.thread_diameter, self.thread_pitch) = decode_imperial_size(
self.size
)
else:
raise ValueError(
f"Invalid socket head cap screw size {self.size}, must be one of:"
f"{list(self.metric_parameters.keys())}"
f"{list(self.imperial_parameters.keys())}"
)
for key in screw_data.keys():
setattr(self, key.lower(), screw_data[key.title()])
if not | |
devicefarm)
if directconnect is not None:
pulumi.set(__self__, "directconnect", directconnect)
if dlm is not None:
pulumi.set(__self__, "dlm", dlm)
if dms is not None:
pulumi.set(__self__, "dms", dms)
if docdb is not None:
pulumi.set(__self__, "docdb", docdb)
if ds is not None:
pulumi.set(__self__, "ds", ds)
if dynamodb is not None:
pulumi.set(__self__, "dynamodb", dynamodb)
if ec2 is not None:
pulumi.set(__self__, "ec2", ec2)
if ecr is not None:
pulumi.set(__self__, "ecr", ecr)
if ecrpublic is not None:
pulumi.set(__self__, "ecrpublic", ecrpublic)
if ecs is not None:
pulumi.set(__self__, "ecs", ecs)
if efs is not None:
pulumi.set(__self__, "efs", efs)
if eks is not None:
pulumi.set(__self__, "eks", eks)
if elasticache is not None:
pulumi.set(__self__, "elasticache", elasticache)
if elasticbeanstalk is not None:
pulumi.set(__self__, "elasticbeanstalk", elasticbeanstalk)
if elastictranscoder is not None:
pulumi.set(__self__, "elastictranscoder", elastictranscoder)
if elb is not None:
pulumi.set(__self__, "elb", elb)
if emr is not None:
pulumi.set(__self__, "emr", emr)
if emrcontainers is not None:
pulumi.set(__self__, "emrcontainers", emrcontainers)
if es is not None:
pulumi.set(__self__, "es", es)
if firehose is not None:
pulumi.set(__self__, "firehose", firehose)
if fms is not None:
pulumi.set(__self__, "fms", fms)
if forecast is not None:
pulumi.set(__self__, "forecast", forecast)
if fsx is not None:
pulumi.set(__self__, "fsx", fsx)
if gamelift is not None:
pulumi.set(__self__, "gamelift", gamelift)
if glacier is not None:
pulumi.set(__self__, "glacier", glacier)
if globalaccelerator is not None:
pulumi.set(__self__, "globalaccelerator", globalaccelerator)
if glue is not None:
pulumi.set(__self__, "glue", glue)
if greengrass is not None:
pulumi.set(__self__, "greengrass", greengrass)
if guardduty is not None:
pulumi.set(__self__, "guardduty", guardduty)
if iam is not None:
pulumi.set(__self__, "iam", iam)
if identitystore is not None:
pulumi.set(__self__, "identitystore", identitystore)
if imagebuilder is not None:
pulumi.set(__self__, "imagebuilder", imagebuilder)
if inspector is not None:
pulumi.set(__self__, "inspector", inspector)
if iot is not None:
pulumi.set(__self__, "iot", iot)
if iotanalytics is not None:
pulumi.set(__self__, "iotanalytics", iotanalytics)
if iotevents is not None:
pulumi.set(__self__, "iotevents", iotevents)
if kafka is not None:
pulumi.set(__self__, "kafka", kafka)
if kinesis is not None:
pulumi.set(__self__, "kinesis", kinesis)
if kinesisanalytics is not None:
pulumi.set(__self__, "kinesisanalytics", kinesisanalytics)
if kinesisanalyticsv2 is not None:
pulumi.set(__self__, "kinesisanalyticsv2", kinesisanalyticsv2)
if kinesisvideo is not None:
pulumi.set(__self__, "kinesisvideo", kinesisvideo)
if kms is not None:
pulumi.set(__self__, "kms", kms)
if lakeformation is not None:
pulumi.set(__self__, "lakeformation", lakeformation)
if lambda_ is not None:
pulumi.set(__self__, "lambda_", lambda_)
if lexmodels is not None:
pulumi.set(__self__, "lexmodels", lexmodels)
if licensemanager is not None:
pulumi.set(__self__, "licensemanager", licensemanager)
if lightsail is not None:
pulumi.set(__self__, "lightsail", lightsail)
if location is not None:
pulumi.set(__self__, "location", location)
if macie is not None:
pulumi.set(__self__, "macie", macie)
if macie2 is not None:
pulumi.set(__self__, "macie2", macie2)
if managedblockchain is not None:
pulumi.set(__self__, "managedblockchain", managedblockchain)
if marketplacecatalog is not None:
pulumi.set(__self__, "marketplacecatalog", marketplacecatalog)
if mediaconnect is not None:
pulumi.set(__self__, "mediaconnect", mediaconnect)
if mediaconvert is not None:
pulumi.set(__self__, "mediaconvert", mediaconvert)
if medialive is not None:
pulumi.set(__self__, "medialive", medialive)
if mediapackage is not None:
pulumi.set(__self__, "mediapackage", mediapackage)
if mediastore is not None:
pulumi.set(__self__, "mediastore", mediastore)
if mediastoredata is not None:
pulumi.set(__self__, "mediastoredata", mediastoredata)
if memorydb is not None:
pulumi.set(__self__, "memorydb", memorydb)
if mq is not None:
pulumi.set(__self__, "mq", mq)
if mwaa is not None:
pulumi.set(__self__, "mwaa", mwaa)
if neptune is not None:
pulumi.set(__self__, "neptune", neptune)
if networkfirewall is not None:
pulumi.set(__self__, "networkfirewall", networkfirewall)
if networkmanager is not None:
pulumi.set(__self__, "networkmanager", networkmanager)
if opsworks is not None:
pulumi.set(__self__, "opsworks", opsworks)
if organizations is not None:
pulumi.set(__self__, "organizations", organizations)
if outposts is not None:
pulumi.set(__self__, "outposts", outposts)
if personalize is not None:
pulumi.set(__self__, "personalize", personalize)
if pinpoint is not None:
pulumi.set(__self__, "pinpoint", pinpoint)
if pricing is not None:
pulumi.set(__self__, "pricing", pricing)
if qldb is not None:
pulumi.set(__self__, "qldb", qldb)
if quicksight is not None:
pulumi.set(__self__, "quicksight", quicksight)
if ram is not None:
pulumi.set(__self__, "ram", ram)
if rds is not None:
pulumi.set(__self__, "rds", rds)
if redshift is not None:
pulumi.set(__self__, "redshift", redshift)
if resourcegroups is not None:
pulumi.set(__self__, "resourcegroups", resourcegroups)
if resourcegroupstaggingapi is not None:
pulumi.set(__self__, "resourcegroupstaggingapi", resourcegroupstaggingapi)
if route53 is not None:
pulumi.set(__self__, "route53", route53)
if route53domains is not None:
pulumi.set(__self__, "route53domains", route53domains)
if route53recoverycontrolconfig is not None:
pulumi.set(__self__, "route53recoverycontrolconfig", route53recoverycontrolconfig)
if route53recoveryreadiness is not None:
pulumi.set(__self__, "route53recoveryreadiness", route53recoveryreadiness)
if route53resolver is not None:
pulumi.set(__self__, "route53resolver", route53resolver)
if s3 is not None:
pulumi.set(__self__, "s3", s3)
if s3control is not None:
pulumi.set(__self__, "s3control", s3control)
if s3outposts is not None:
pulumi.set(__self__, "s3outposts", s3outposts)
if sagemaker is not None:
pulumi.set(__self__, "sagemaker", sagemaker)
if schemas is not None:
pulumi.set(__self__, "schemas", schemas)
if sdb is not None:
pulumi.set(__self__, "sdb", sdb)
if secretsmanager is not None:
pulumi.set(__self__, "secretsmanager", secretsmanager)
if securityhub is not None:
pulumi.set(__self__, "securityhub", securityhub)
if serverlessrepo is not None:
pulumi.set(__self__, "serverlessrepo", serverlessrepo)
if servicecatalog is not None:
pulumi.set(__self__, "servicecatalog", servicecatalog)
if servicediscovery is not None:
pulumi.set(__self__, "servicediscovery", servicediscovery)
if servicequotas is not None:
pulumi.set(__self__, "servicequotas", servicequotas)
if ses is not None:
pulumi.set(__self__, "ses", ses)
if shield is not None:
pulumi.set(__self__, "shield", shield)
if signer is not None:
pulumi.set(__self__, "signer", signer)
if sns is not None:
pulumi.set(__self__, "sns", sns)
if sqs is not None:
pulumi.set(__self__, "sqs", sqs)
if ssm is not None:
pulumi.set(__self__, "ssm", ssm)
if ssoadmin is not None:
pulumi.set(__self__, "ssoadmin", ssoadmin)
if stepfunctions is not None:
pulumi.set(__self__, "stepfunctions", stepfunctions)
if storagegateway is not None:
pulumi.set(__self__, "storagegateway", storagegateway)
if sts is not None:
pulumi.set(__self__, "sts", sts)
if swf is not None:
pulumi.set(__self__, "swf", swf)
if synthetics is not None:
pulumi.set(__self__, "synthetics", synthetics)
if timestreamwrite is not None:
pulumi.set(__self__, "timestreamwrite", timestreamwrite)
if transfer is not None:
pulumi.set(__self__, "transfer", transfer)
if waf is not None:
pulumi.set(__self__, "waf", waf)
if wafregional is not None:
pulumi.set(__self__, "wafregional", wafregional)
if wafv2 is not None:
pulumi.set(__self__, "wafv2", wafv2)
if worklink is not None:
pulumi.set(__self__, "worklink", worklink)
if workmail is not None:
pulumi.set(__self__, "workmail", workmail)
if workspaces is not None:
pulumi.set(__self__, "workspaces", workspaces)
if xray is not None:
pulumi.set(__self__, "xray", xray)
@property
@pulumi.getter
def accessanalyzer(self) -> Optional[str]:
return pulumi.get(self, "accessanalyzer")
@property
@pulumi.getter
def acm(self) -> Optional[str]:
return pulumi.get(self, "acm")
@property
@pulumi.getter
def acmpca(self) -> Optional[str]:
return pulumi.get(self, "acmpca")
@property
@pulumi.getter
def amplify(self) -> Optional[str]:
return pulumi.get(self, "amplify")
@property
@pulumi.getter
def apigateway(self) -> Optional[str]:
return pulumi.get(self, "apigateway")
@property
@pulumi.getter
def appconfig(self) -> Optional[str]:
return pulumi.get(self, "appconfig")
@property
@pulumi.getter
def applicationautoscaling(self) -> Optional[str]:
return pulumi.get(self, "applicationautoscaling")
@property
@pulumi.getter
def applicationinsights(self) -> Optional[str]:
return pulumi.get(self, "applicationinsights")
@property
@pulumi.getter
def appmesh(self) -> Optional[str]:
return pulumi.get(self, "appmesh")
@property
@pulumi.getter
def apprunner(self) -> Optional[str]:
return pulumi.get(self, "apprunner")
@property
@pulumi.getter
def appstream(self) -> Optional[str]:
return pulumi.get(self, "appstream")
@property
@pulumi.getter
def appsync(self) -> Optional[str]:
return pulumi.get(self, "appsync")
@property
@pulumi.getter
def athena(self) -> Optional[str]:
return pulumi.get(self, "athena")
@property
@pulumi.getter
def auditmanager(self) -> Optional[str]:
return pulumi.get(self, "auditmanager")
@property
@pulumi.getter
def autoscaling(self) -> Optional[str]:
return pulumi.get(self, "autoscaling")
@property
@pulumi.getter
def autoscalingplans(self) -> Optional[str]:
return pulumi.get(self, "autoscalingplans")
@property
@pulumi.getter
def backup(self) -> Optional[str]:
return pulumi.get(self, "backup")
@property
@pulumi.getter
def batch(self) -> Optional[str]:
return pulumi.get(self, "batch")
@property
@pulumi.getter
def budgets(self) -> Optional[str]:
return pulumi.get(self, "budgets")
@property
@pulumi.getter
def chime(self) -> Optional[str]:
return pulumi.get(self, "chime")
@property
@pulumi.getter
def cloud9(self) -> Optional[str]:
return pulumi.get(self, "cloud9")
@property
@pulumi.getter
def cloudcontrolapi(self) -> Optional[str]:
return pulumi.get(self, "cloudcontrolapi")
@property
@pulumi.getter
def cloudformation(self) -> Optional[str]:
return pulumi.get(self, "cloudformation")
@property
@pulumi.getter
def cloudfront(self) -> Optional[str]:
return pulumi.get(self, "cloudfront")
@property
@pulumi.getter
def cloudhsm(self) -> Optional[str]:
return pulumi.get(self, "cloudhsm")
@property
@pulumi.getter
def cloudsearch(self) -> Optional[str]:
return pulumi.get(self, "cloudsearch")
@property
@pulumi.getter
def cloudtrail(self) -> Optional[str]:
return pulumi.get(self, "cloudtrail")
@property
@pulumi.getter
def cloudwatch(self) -> Optional[str]:
return pulumi.get(self, "cloudwatch")
@property
@pulumi.getter
def cloudwatchevents(self) -> Optional[str]:
return pulumi.get(self, "cloudwatchevents")
@property
@pulumi.getter
def cloudwatchlogs(self) -> Optional[str]:
return pulumi.get(self, "cloudwatchlogs")
@property
@pulumi.getter
def codeartifact(self) -> Optional[str]:
return pulumi.get(self, "codeartifact")
@property
@pulumi.getter
def codebuild(self) -> Optional[str]:
return pulumi.get(self, "codebuild")
@property
@pulumi.getter
def codecommit(self) -> Optional[str]:
return pulumi.get(self, "codecommit")
@property
@pulumi.getter
def codedeploy(self) -> Optional[str]:
return pulumi.get(self, "codedeploy")
@property
@pulumi.getter
def codepipeline(self) -> Optional[str]:
return pulumi.get(self, "codepipeline")
@property
@pulumi.getter
def codestarconnections(self) -> Optional[str]:
return pulumi.get(self, "codestarconnections")
@property
@pulumi.getter
def cognitoidentity(self) -> Optional[str]:
return pulumi.get(self, "cognitoidentity")
@property
@pulumi.getter
def cognitoidp(self) -> Optional[str]:
return pulumi.get(self, "cognitoidp")
@property
@pulumi.getter
def configservice(self) -> Optional[str]:
return pulumi.get(self, "configservice")
@property
@pulumi.getter
def connect(self) -> Optional[str]:
return pulumi.get(self, | |
root = build_tree(result['id'], executions_by_parent, execution_descriptions, is_cached_result)
if tree:
print(format_tree(tree[root], root))
try:
num_processed_results = 0
roots = collections.OrderedDict()
for execution_result in dxpy.find_executions(**query):
if args.trees:
if args.classname == 'job':
root = execution_result['describe']['originJob']
else:
root = execution_result['describe']['rootExecution']
if root not in roots:
num_processed_results += 1
else:
num_processed_results += 1
if (num_processed_results > args.num_results):
more_results = True
break
if args.json:
json_output.append(execution_result['describe'])
elif args.trees:
roots[root] = root
if args.classname == 'analysis' and root.startswith('job-'):
# Analyses in trees with jobs at their root found in "dx find analyses" are displayed unrooted,
# and only the last analysis found is displayed.
roots[root] = execution_result['describe']['id']
elif args.brief:
print(execution_result['id'])
elif not args.trees:
print(format_tree({}, get_find_executions_string(execution_result['describe'],
has_children=False,
single_result=True,
show_outputs=args.show_outputs)))
if args.trees:
executions_by_parent, descriptions = collections.defaultdict(list), {}
root_field = 'origin_job' if args.classname == 'job' else 'root_execution'
parent_field = 'masterJob' if args.no_subjobs else 'parentJob'
query = {'classname': args.classname,
'describe': {"io": include_io},
'include_subjobs': False if args.no_subjobs else True,
root_field: list(roots.keys())}
if not args.all_projects:
# If the query doesn't specify a project, the server finds all projects to which the user has explicit
# permissions, but doesn't search through public projects.
# In "all projects" mode, we don't specify a project in the initial query, and so don't need to specify
# one in the follow-up query here (because the initial query can't return any jobs in projects to which
# the user doesn't have explicit permissions).
# When searching in a specific project, we set a project in the query here, in case this is a public
# project and the user doesn't have explicit permissions (otherwise, the follow-up query would return
# empty results).
query['project'] = project
def process_execution_result(execution_result):
execution_desc = execution_result['describe']
parent = execution_desc.get(parent_field) or execution_desc.get('parentAnalysis')
descriptions[execution_result['id']] = execution_desc
if parent:
executions_by_parent[parent].append(execution_result['id'])
# If an analysis with cached children, also insert those
if execution_desc['class'] == 'analysis':
for stage_desc in execution_desc['stages']:
if 'parentAnalysis' in stage_desc['execution'] and stage_desc['execution']['parentAnalysis'] != execution_result['id'] and \
(args.classname != 'analysis' or stage_desc['execution']['class'] == 'analysis'):
# this is a cached stage (with a different parent)
executions_by_parent[execution_result['id']].append(stage_desc['execution']['id'])
if stage_desc['execution']['id'] not in descriptions:
descriptions[stage_desc['execution']['id']] = stage_desc['execution']
# Short-circuit the find_execution API call(s) if there are
# no root executions (and therefore we would have gotten 0
# results anyway)
if len(roots.keys()) > 0:
for execution_result in dxpy.find_executions(**query):
process_execution_result(execution_result)
# ensure roots are sorted by their creation time
sorted_roots = sorted(roots, key=lambda root: -descriptions[roots[root]]['created'])
for root in sorted_roots:
process_tree(descriptions[roots[root]], executions_by_parent, descriptions)
if args.json:
print(json.dumps(json_output, indent=4))
if more_results and get_delimiter() is None and not (args.brief or args.json):
print(fill("* More results not shown; use -n to increase number of results or --created-before to show older results", subsequent_indent=' '))
except:
err_exit()
def find_data(args):
# --folder deprecated to --path.
if args.folder is None and args.path is not None:
args.folder = args.path
elif args.folder is not None and args.path is not None:
err_exit(exception=DXParserError('Cannot supply both --folder and --path.'),
expected_exceptions=(DXParserError,))
try_call(process_find_by_property_args, args)
if args.all_projects:
args.project = None
args.folder = None
args.recurse = True
elif args.project is None:
args.project = dxpy.WORKSPACE_ID
else:
if get_last_pos_of_char(':', args.project) == -1:
args.project = args.project + ':'
if args.folder is not None and get_last_pos_of_char(':', args.folder) != -1:
err_exit(exception=DXParserError('Cannot supply both --project and --path PROJECTID:FOLDERPATH.'),
expected_exceptions=(DXParserError,))
args.project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.folder is not None and not args.folder.startswith('/'):
args.project, args.folder, _none = try_call(resolve_path, args.folder, expected='folder')
if args.brief:
describe_input = dict(fields=dict(project=True, id=True))
elif args.verbose:
describe_input = True
else:
describe_input = dict(fields=get_ls_l_desc_fields())
try:
results = dxpy.find_data_objects(classname=args.classname,
state=args.state,
visibility=args.visibility,
properties=args.properties,
name=args.name,
name_mode='glob',
typename=args.type,
tags=args.tag, link=args.link,
project=args.project,
folder=args.folder,
recurse=(args.recurse if not args.recurse else None),
modified_after=args.mod_after,
modified_before=args.mod_before,
created_after=args.created_after,
created_before=args.created_before,
region=args.region,
describe=describe_input)
if args.json:
print(json.dumps(list(results), indent=4))
return
if args.brief:
for result in results:
print(result['project'] + ':' + result['id'])
else:
for result in results:
if args.verbose:
print("")
print_data_obj_desc(result["describe"])
else:
print_ls_l_desc(result["describe"], include_folder=True, include_project=args.all_projects)
except:
err_exit()
def find_projects(args):
try_call(process_find_by_property_args, args)
try_call(process_phi_param, args)
try:
results = dxpy.find_projects(name=args.name, name_mode='glob',
properties=args.properties, tags=args.tag,
level=('VIEW' if args.public else args.level),
describe=(not args.brief),
explicit_perms=(not args.public if not args.public else None),
public=(args.public if args.public else None),
created_after=args.created_after,
created_before=args.created_before,
region=args.region,
containsPHI=args.containsPHI)
except:
err_exit()
format_find_results(args, results)
def find_apps_result(args):
raw_results = dxpy.find_apps(name=args.name, name_mode='glob', category=args.category,
all_versions=args.all,
published=(not args.unpublished),
billed_to=args.billed_to,
created_by=args.creator,
developer=args.developer,
created_after=args.created_after,
created_before=args.created_before,
modified_after=args.mod_after,
modified_before=args.mod_before,
describe={"fields": {"name": True,
"installed": args.installed,
"title": not args.brief,
"version": not args.brief,
"published": args.verbose,
"billTo": not args.brief}})
if args.installed:
maybe_filtered_by_install = (result for result in raw_results if result['describe']['installed'])
else:
maybe_filtered_by_install = raw_results
if args.brief:
results = ({"id": result['id']} for result in maybe_filtered_by_install)
else:
results = sorted(maybe_filtered_by_install, key=lambda result: result['describe']['name'])
return results
def find_global_workflows_result(args):
raw_results = dxpy.find_global_workflows(name=args.name, name_mode='glob', category=args.category,
all_versions=args.all,
published=(not args.unpublished),
billed_to=args.billed_to,
created_by=args.creator,
developer=args.developer,
created_after=args.created_after,
created_before=args.created_before,
modified_after=args.mod_after,
modified_before=args.mod_before,
describe={"fields": {"name": True,
"title": not args.brief,
"version": not args.brief,
"published": args.verbose,
"billTo": not args.brief}})
if args.brief:
results = ({"id": result['id']} for result in raw_results)
else:
results = sorted(raw_results, key=lambda result: result['describe']['name'])
return results
def print_find_results(results, args):
def maybe_x(result):
return DNANEXUS_X() if result['describe']['billTo'] == 'org-dnanexus' else ' '
if args.json:
print(json.dumps(list(results), indent=4))
return
if args.brief:
for result in results:
print(result['id'])
elif not args.verbose:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER("), v") + result["describe"]["version"])
else:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result["id"] + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER('), v') + result['describe']['version'] + DELIMITER(" (") + ("published" if result["describe"].get("published", 0) > 0 else "unpublished") + DELIMITER(")"))
def find_apps(args):
try:
results = find_apps_result(args)
print_find_results(results, args)
except:
err_exit()
def find_global_workflows(args):
try:
results = find_global_workflows_result(args)
print_find_results(results, args)
except:
err_exit()
def update_project(args):
input_params = get_update_project_args(args)
# The resolver expects a ':' to separate projects from folders.
if ':' not in args.project_id:
args.project_id += ':'
project, _none, _none = try_call(resolve_existing_path,
args.project_id, 'project')
try:
results = dxpy.api.project_update(object_id=project, input_params=input_params)
if args.brief:
print(results['id'])
else:
print(json.dumps(results))
except:
err_exit()
def close(args):
if '_DX_FUSE' in os.environ:
from xattr import xattr
handlers = []
had_error = False
for path in args.path:
# Attempt to resolve name
try:
project, _folderpath, entity_results = resolve_existing_path(path,
expected='entity',
allow_mult=True,
all_mult=args.all)
except:
project, entity_results = None, None
if entity_results is None:
print(fill('Could not resolve "' + path + '" to a name or ID'))
had_error = True
else:
for result in entity_results:
try:
obj = dxpy.get_handler(result['id'], project=project)
if '_DX_FUSE' in os.environ:
xattr(path)['state'] = 'closed'
else:
obj.close()
handlers.append(obj)
except Exception as details:
print(fill(str(details)))
if args.wait:
for handler in handlers:
handler._wait_on_close()
if had_error:
err_exit('', 3)
def wait(args):
had_error = False
# If only one path was provided, together with the --from-file argument,
# check to see if it is a local file and if so gather actual paths
# on which to wait from the contents of the file.
if args.from_file and len(args.path) == 1 and os.path.isfile(args.path[0]):
try:
args.path = open(args.path[0]).read().strip().split('\n')
except IOError as e:
raise DXCLIError(
'Could not open {}. The problem was: {}' % (args.path[0], e))
for path in args.path:
if is_job_id(path) or is_analysis_id(path):
dxexecution = dxpy.get_handler(path)
print("Waiting for " + path + " to finish running...")
try_call(dxexecution.wait_on_done)
print("Done")
else:
# Attempt to resolve name
try:
project, _folderpath, entity_result = resolve_existing_path(path, expected='entity')
except:
project, entity_result = None, None
if entity_result is None:
print(fill('Could not resolve ' + path + ' to a data object'))
had_error = True
else:
handler = dxpy.get_handler(entity_result['id'], project=project)
print("Waiting for " + path + " to close...")
try_call(handler._wait_on_close)
print("Done")
if had_error:
err_exit('', 3)
def build(args):
sys.argv = ['dx build'] + sys.argv[2:]
def get_mode(src_dir):
"""
Returns an applet or a workflow mode based on whether
the source directory contains dxapp.json or dxworkflow.json.
"""
if not os.path.isdir(src_dir):
parser.error("{} is not a directory".format(src_dir))
if os.path.exists(os.path.join(src_dir, "dxworkflow.json")):
return "workflow"
else:
return "applet"
def get_validated_source_dir(args):
src_dir = args.src_dir
if src_dir is None:
src_dir = os.getcwd()
if USING_PYTHON2:
src_dir = src_dir.decode(sys.getfilesystemencoding())
return src_dir
def handle_arg_conflicts(args):
"""
Raises parser error (exit code 3) if there are any conflicts in the specified options.
"""
if args.mode == "app" and args.destination != '.':
build_parser.error("--destination cannot be used when creating an app (only an applet)")
if args.mode == "applet" and args.region:
build_parser.error("--region cannot be used when creating an applet (only an app)")
if args.overwrite and args.archive:
build_parser.error("Options -f/--overwrite and | |
import numpy as np
import copy
# import sklearn
import sklearn as sk
import sklearn.preprocessing
import sklearn.model_selection
import sklearn.ensemble
import sklearn.metrics
# import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
# import internal functions
from plotly_scientific_plots.plotly_misc import plotOut
perc = lambda x: np.sum(x)/len(x)*100
def plotMultiROC(y_true, # list of true labels
y_scores, # array of scores for each class of shape [n_samples, n_classes]
title = 'Multiclass ROC Plot',
n_points=100, # reinterpolates to have exactly N points
labels = None, # list of labels for each class
threshdot = None,
plot=True, # 1/0. If 0, returns plotly json object, but doesnt plot
):
"""
Makes a multiclass ROC plot. Can also be used for binary ROC plot
"""
y_true = np.array(y_true)
y_scores = np.array(y_scores)
if y_scores.ndim == 1: # convert to [n_samples, n_classes] even if 1 class
y_scores = np.atleast_2d(y_scores).T
N, n_classes = y_scores.shape
if n_classes == 1: # needed to avoid inverting when doing binary classification
y_scores *= -1
if threshdot is not None:
threshdot *= -1
# calc ROC curves & AUC
fpr = dict()
tpr = dict()
thresh = dict()
thresh_txt = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], thresh[i] = sk.metrics.roc_curve(y_true == i, y_scores[:, i])
roc_auc[i] = sk.metrics.auc(fpr[i], tpr[i])
if n_points is not None:
x = np.linspace(0, 1, n_points)
indxs = np.searchsorted(tpr[i], x)
tpr[i] = tpr[i][indxs]
fpr[i] = fpr[i][indxs]
thresh[i] = thresh[i][indxs]
thresh_txt[i] = ['T=%.4f' % t for t in thresh[i]]
if labels is None:
labels = ['C%d' % n for n in range(1, n_classes+1)]
labels = [str(x) for x in labels] # convert labels to str
# make traces
traces = []
[traces.append(go.Scatter(y=tpr[i], x=fpr[i], name=labels[i] + '. AUC= %.2f' % (roc_auc[i]), text=thresh_txt[i],
legendgroup=str(i), line={'width': 1}))
for i in range(n_classes)]
traces += [go.Scatter(y=[0, 1], x=[0, 1], name='Random classifier', line={'width': 1, 'dash': 'dot'})]
if threshdot is not None:
for i in range(n_classes):
c_indx = (np.abs(thresh[i]-threshdot)).argmin()
traces += [go.Scatter(x=[fpr[i][c_indx]]*2, y=[tpr[i][c_indx]]*2, mode='markers',
name='Threshold', legendgroup=str(i), showlegend=False)]
# make layout
layout = go.Layout(title=title,
xaxis={'title': 'FPR'},
yaxis={'title': 'TPR'},
legend=dict(x=1),
hovermode='closest',
)
fig = go.Figure(data=traces, layout=layout)
return plotOut(fig, plot)
def plotMultiPR(y_true, # list of true labels
y_scores, # array of scores for each class of shape [n_samples, n_classes]
title = 'Multiclass PR Plot',
n_points=100, # reinterpolates to have exactly N points
labels = None, # list of labels for each class
threshdot=None, # whether to plot a dot @ the threshold
plot=True, # 1/0. If 0, returns plotly json object, but doesnt plot
):
"""
Makes a multiclass ROC plot
"""
y_true = np.array(y_true)
y_scores = np.array(y_scores)
if y_scores.ndim == 1: # convert to [n_samples, n_classes] even if 1 class
y_scores = np.atleast_2d(y_scores).T
N, n_classes = y_scores.shape
if n_classes == 1: # needed to avoid inverting when doing binary classification
y_scores = -1*y_scores
# calc ROC curves & AUC
precision = dict()
recall = dict()
pr_auc = dict()
thresh = dict()
thresh_txt = dict()
for i in range(n_classes):
precision[i], recall[i], thresh[i] = sk.metrics.precision_recall_curve(y_true == i, y_scores[:, i])
#average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i])
#pr_auc[i] = sk.metrics.auc(precision[i], recall[i])
pr_auc[i] = 1
if n_points is not None:
x = np.linspace(precision[i][0], precision[i][-1], n_points)
indxs = np.searchsorted(precision[i], x)
precision[i] = precision[i][indxs]
recall[i] = recall[i][indxs]
thresh[i] = thresh[i][np.clip(indxs, 0, thresh[i].size - 1)]
thresh_txt[i] = ['T=%.4f' % t for t in thresh[i]]
if labels is None:
labels = ['C%d' % n for n in range(1, n_classes+1)]
labels = [str(x) for x in labels] # convert to str
# make traces
traces = []
[traces.append(go.Scatter(y=precision[i], x=recall[i], name=labels[i] + '. AUC= %.2f' % (pr_auc[i]),
text=thresh_txt[i], legendgroup=str(i), line={'width': 1})) for i in range(n_classes)]
if threshdot is not None:
for i in range(n_classes):
c_indx = (np.abs(thresh[i]-threshdot)).argmin()
traces += [go.Scatter(x=[recall[i][c_indx]]*2, y=[precision[i][c_indx]]*2, mode='markers',
name='Threshold', legendgroup=str(i), showlegend=False)]
# make layout
layout = go.Layout(title=title,
yaxis={'title': 'Precision = P(y=1 | yp=1)',
'range': [0, 1]}, # 'Precision = P(yp=y | yp=1)'
xaxis={'title': 'Recall = TPR = P(yp=1 | y=1)',
'range': [0, 1]}, # 'Recall = TPR = P(yp=y | y=1)'
legend=dict(x=1),
hovermode='closest',
)
fig = go.Figure(data=traces, layout=layout)
return plotOut(fig, plot)
def plotConfusionMatrix(y_true, # list of true labels
y_pred, # list of predicted labels
conf_matrix = None, # optional mode to directly provide confusion matrix
title = None,
labels = None, # list of labels for each class
binarized = None, # if int/str then makes 1vsAll confusion matrix of that class
add_totals = True, # whether to add an extra row for class totals
plot = True, # 1/0. If 0, returns plotly json object, but doesnt plot
fontsize=18, # axis font
norm='rows', # how to norm matrix colors. either 'all'/'rows'/'columns'
):
"""
Plots either a full or binarized confusion matrix
EX: plotConfusionMatrix(y_true, y_pred, labels)
"""
if conf_matrix is None:
n_classes = len(labels) if labels is not None else len(np.unique(y_true))
conf_matrix = sk.metrics.confusion_matrix(y_true, y_pred, labels=range(n_classes))
else:
n_classes = conf_matrix.shape[0]
if labels is None:
labels = ['C%d' % n for n in range(1, n_classes + 1)]
acc = np.diag(conf_matrix).sum() / np.sum(conf_matrix) * 100
if binarized is not None:
# identify index of 1vsAll category
if type(binarized) == str:
bin_indx = labels.index(binarized)
else:
bin_indx = binarized
tp = np.sum(np.delete(np.delete(conf_matrix, bin_indx, axis=0), bin_indx, axis=1))
fp = np.sum(np.delete(conf_matrix[bin_indx, :], bin_indx))
fn = np.sum(np.delete(conf_matrix, bin_indx, axis=0)[:, bin_indx])
tn = conf_matrix[bin_indx, bin_indx]
conf_matrix = np.array([[tp, fn], [fp, tn]])
labels = ['T','F']
n_classes = 2
labels = [str(x) for x in labels] # convert to str
labels = ['['+x+']' if len(x)==1 else x for x in labels] #needed for stupid plotly bug
# adds an extra row for matrix totals
conf_matrix_tots = copy.deepcopy(conf_matrix)
if add_totals:
pred_tots = np.sum(conf_matrix, 0)
conf_matrix_tots = np.vstack((conf_matrix, pred_tots))
true_tots = np.sum(conf_matrix_tots, 1, keepdims=True)
conf_matrix_tots = np.hstack((conf_matrix_tots, true_tots ))
labels = labels + ['TOTAL']
# shorten labels
labels_short = [x[:10] if type(x) == str else x for x in labels]
# numeric labels
num_labels = list(range(len(labels)))
def normMatByTotal(mat, axis=0):
''' This normalzies a matrix by its row (axis=1) or column (axis=0) totals'''
axis_sums = np.sum(mat, axis=axis, keepdims=True).astype('float32')
axis_sums[axis_sums == 0] = np.nan # this avoids divide by 0.
mat = np.nan_to_num(mat / axis_sums)
return mat
# percentage hover labels
row_percs = normMatByTotal(conf_matrix, axis=1)
col_percs = normMatByTotal(conf_matrix, axis=0)
# normalize matrix
color_mat = copy.deepcopy(conf_matrix_tots)
if norm != 'all':
norm_conf_matrix = row_percs if norm=='rows' else col_percs
else:
norm_conf_matrix = conf_matrix
color_mat = color_mat.astype(float)
color_mat[:norm_conf_matrix.shape[0],:norm_conf_matrix.shape[1]] = norm_conf_matrix
# hover text
txt_format = '%d<br><b>Pred:</b> %s <br><b>True:</b> %s <br><b>Row norm:</b> %.3f%% <br><b>Col norm:</b> %.3f%%'
htext = np.array([[txt_format % (conf_matrix[r,c], labels[c], labels[r], row_percs[r,c]*100, col_percs[r,c]*100)
for c in range(n_classes)] for r in range(n_classes)])
# Adjust Total rows
if add_totals:
totals_row_shading = .0 # range 0 to 1. 0=darkest, 1=lightest
tot_val = np.min(norm_conf_matrix) + (np.max(norm_conf_matrix) - np.min(norm_conf_matrix))*totals_row_shading
color_mat[-1, :] = tot_val
color_mat[:, -1] = tot_val
pred_tot_text = np.array(['<b>%% of Predictions:</b> %.2f%%' % x for x in pred_tots/sum(pred_tots)*100])
true_tot_text = np.array([['<b>%% of True Data:</b> %.2f%%' % x] for x in true_tots[:-1]/sum(true_tots[:-1])*100]+[['Total Samples']])
htext = np.hstack((np.vstack((htext, pred_tot_text)), true_tot_text))
fig = ff.create_annotated_heatmap(color_mat, x=num_labels, y=num_labels,
colorscale='Greys', annotation_text=conf_matrix_tots)
fig.layout.yaxis.title = 'True'
fig.layout.xaxis.title = 'Predicted (Total accuracy = %.3f%%)' % acc
fig.layout.xaxis.titlefont.size = fontsize
fig.layout.yaxis.titlefont.size = fontsize
fig.layout.xaxis.tickfont.size = fontsize - 2
fig.layout.yaxis.tickfont.size = fontsize - 2
fig.layout.showlegend = False
# Add label text to axis values
fig.layout.xaxis.tickmode = 'array'
fig.layout.xaxis.range = [-.5, n_classes+.5]
fig.layout.xaxis.tickvals = num_labels
fig.layout.xaxis.ticktext = labels_short
fig.data[0].hoverlabel.bgcolor = 'rgb(188,202,225)'
if title is not None:
fig.layout.title = title
# fig.layout.yaxis.autorange = 'reversed'
fig.layout.yaxis.tickmode = 'array'
fig.layout.yaxis.range = [n_classes+.5, -.5]
fig.layout.yaxis.tickvals = num_labels
fig.layout.yaxis.ticktext = labels_short
fig.layout.margin.l = 120 # adjust left margin to avoid ylbl overlaying tick str's
fig['data'][0]['xgap'] = 1
fig['data'][0]['ygap'] = 1
## Change annotation font (& text)
for i in range(len(fig.layout.annotations)):
fig.layout.annotations[i].font.size = fontsize-3
#fig.layout.annotations[i].text = str(conf_matrix_tots.flatten()[i])
# add hover text
fig.data[0].text = htext
fig.data[0].hoverinfo = 'text'
### Adjust totals fontstyle
if add_totals:
# get totals indxs
n = n_classes
last_column_indxs = [(n + 1) * x - 1 for x in range(1, n + 1)]
last_row_indxs = list(range((n + 1) * (n), (n + 1) ** 2))
totals_annot_indxs = last_row_indxs + | |
assign to each rank; this provides a way to
specify a partition manually.
If ``None``, the parameters are partitioned according to an
internal algorithm.
(default: ``None``)
Returns:
A :class:`list` where each element of the list contains the
``param_groups`` for a rank (which itself is a :class:`list` of
:class:`dict`); element 0 corresponds to rank 0, etc.; each rank
stores the ``param_groups`` for all ranks for the collective
communication in :meth:`step`.
Raises:
ValueError: see :meth:`_validate_params_per_rank`.
RuntimeError: if ``params_per_rank`` is not ``None`` and this
:class:`ZeroRedundancyOptimizer` instance is using more than
one parameter group.
"""
if params_per_rank is None:
# Partition the parameters optimizing for uniformity
if len(self._partition_parameters_cache) == 0:
self._partition_parameters_cache = [[] for _ in range(self.world_size)]
sizes = [0] * self.world_size
for param_group in self.param_groups:
param_group_params_per_rank: List[List] = [[] for _ in range(self.world_size)]
# Sort the parameters by size (largest first)
params_sorted = sorted(param_group["params"], key=lambda t: t.numel(), reverse=True)
for param in params_sorted:
# Greedily add the parameter to rank with smallest size so far
rank = self._get_min_index(sizes)
param_group_params_per_rank[rank].append(param)
sizes[rank] += param.numel()
# Apply the constructed partition of the parameter group
self._partition_param_group(param_group, param_group_params_per_rank)
return self._partition_parameters_cache
# Partition the parameters according to `params_per_rank`
assert len(self._partition_parameters_cache) == 0, \
"Specifying `params_per_rank` should only be done when the " \
"parameters have not been partitioned yet"
if len(self.param_groups) != 1:
raise RuntimeError(
"Specifying `params_per_rank` only supports a single "
"parameter group"
)
self._verify_params_per_rank(params_per_rank)
self._partition_parameters_cache = [[] for _ in range(self.world_size)]
# Apply the passed-in partition of the parameter group
param_group = self.param_groups[0]
self._partition_param_group(param_group, params_per_rank)
return self._partition_parameters_cache
@property
def _param_to_rank(self) -> Dict[torch.Tensor, int]:
r"""
:class:`dict` mapping parameters to their assigned data parallel rank
in the partition.
"""
if len(self._param_to_rank_cache) == 0:
for rank, param_groups in enumerate(self._partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
self._param_to_rank_cache[param] = rank
return self._param_to_rank_cache
@property
def _param_to_index(self) -> Dict[torch.Tensor, int]:
r"""
:class:`dict` mapping parameters to their indices in the global
optimizer state.
NOTE: This assumes that the global optimizer state's indexing (in
``state_dict``) follows a linear ordering over the parameter groups.
"""
if len(self._param_to_index_cache) == 0:
self._param_to_index_cache = {
p: i for i, p in enumerate(chain(*(g["params"] for g in self.param_groups)))
}
return self._param_to_index_cache
@property
def _index_to_param(self) -> List[torch.Tensor]:
r"""
List mapping parameter indices in the global optimizer scheme to the
actual params.
"""
if len(self._index_to_param_cache) == 0:
self._index_to_param_cache = list(chain(*(g["params"] for g in self.param_groups)))
return self._index_to_param_cache
def _broadcast_params_from_rank(self, rank: int):
r"""
Broadcasts the shard of parameters from a given rank to all other
ranks asynchronously.
Arguments:
rank (int): the source rank.
Returns:
A :class:`list` of async work handles for the ``broadcast()`` s
performed to synchronize the parameters.
"""
assert not self._overlap_with_ddp, \
"`_broadcast_params_from_rank()` should not be used if " \
"`overlap_with_ddp=True`; instead, the broadcasting should " \
"happen in the DDP communication hook"
handles = []
if self.parameters_as_bucket_view:
for dev_i_buckets in self._buckets:
bucket = dev_i_buckets[rank]
global_rank = _get_global_rank(self.process_group, rank)
handles.append(
dist.broadcast(tensor=bucket, src=global_rank,
group=self.process_group, async_op=True)
)
else:
param_groups = self._partition_parameters()[rank]
global_rank = _get_global_rank(self.process_group, rank)
for param_group in param_groups:
for param in param_group["params"]:
handles.append(
dist.broadcast(tensor=param.data, src=global_rank,
group=self.process_group, async_op=True)
)
return handles
def _sync_params(self):
r"""
Syncs all parameter shards across the ranks.
This rank sends its shard of the parameters to all other ranks and
receives a shard from each other rank. This is done using
``broadcast()``. Parameters are sent bucket-by-bucket if
``parameters_as_bucket_view=True``and sent parameter-by-parameter
otherwise.
"""
handles = []
for rank in range(self.world_size):
handles.extend(self._broadcast_params_from_rank(rank))
_ = list(map(lambda x: x.wait(), handles))
@property
def _device_to_params_per_rank(
self
) -> Dict[torch.device, List[List[torch.Tensor]]]:
r"""
:class:`dict` mapping each device to a :class:`list` of the per-rank parameter
lists filtered to only include the parameters stored on that device.
Each per-rank parameter list gives the parameters assigned to that rank
to update.
This is used for constructing the parameter buckets if
``parameters_as_bucket_view=True``.
Let ``dev_i`` denote the ``i``th device for this rank. Then:
``dev_0`` maps to a list containing:
rank 0's assigned parameters stored on ``dev_0``,
rank 1's assigned parameters stored on ``dev_0``,
...
``dev_1`` maps to a list containing:
rank 0's assigned parameters stored on ``dev_1``,
rank 1's assigned parameters stored on ``dev_1``,
...
...
"""
assert self.parameters_as_bucket_view, \
"`_device_to_params_per_rank` should only be used if " \
"`parameters_as_bucket_view=True`"
if len(self._device_to_params_per_rank_cache) == 0:
for rank, param_groups in enumerate(self._partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
device = param.device
if device not in self._device_to_params_per_rank_cache:
self._device_to_params_per_rank_cache[device] = [[] for _ in range(self.world_size)]
self._device_to_params_per_rank_cache[device][rank].append(param)
return self._device_to_params_per_rank_cache
def _get_min_index(
self,
values: List[int],
disallowed_indices: Optional[Set[int]] = None,
) -> int:
r"""
Returns ``values.index(min(values))``, except only uses one pass. It
also excludes any indices in ``disallowed_indices`` if provided.
Arguments:
values: (List[int]): :class:`list` of values.
disallowed_indices (Optional[Set[int]]): indices that are
disallowed from being the returned min index.
"""
min_index = -1
min_value = float("inf")
for i, value in enumerate(values):
if disallowed_indices and i in disallowed_indices:
continue
if value < min_value:
min_value = value
min_index = i
assert min_index >= 0, "All indices are disallowed"
return min_index
def _assign_bucket_subset_to_rank(
self,
bucket_index: int,
bucket_params: List[torch.Tensor],
bucket_offset: int,
assigned_rank: int,
assigned_ranks_per_bucket: List[Set[int]],
) -> None:
r"""
Assigns the model parameters given by ``bucket_params``, representing a
(possibly non-strict) subset of the parameters corresponding to a
:class:`DistributedDataParallel` bucket, to the rank with the least
size assigned so far and collects relevant information.
Arguments:
bucket_index (int): index of the :class:`DistributedDataParallel`
gradient bucket.
bucket_params (List[torch.Tensor]): subset of the parameters
corresponding to the bucket to assign.
bucket_offset (int): offset giving the index of the first element
in ``bucket_params`` in the bucket's full parameter list.
assigned_rank (int): rank to assign to.
assigned_ranks_per_bucket (List[Set[int]]): :class:`set` of ranks
assigned to each bucket.
"""
overlap_info = self._overlap_info
if len(bucket_params) == 0:
raise ValueError(
"Empty bucket assignment"
)
params_per_rank = overlap_info.params_per_rank
offsets = overlap_info.offsets
self._bucket_assignments_per_rank_cache[assigned_rank][bucket_index] = \
_DDPBucketAssignment(bucket_index, bucket_params, bucket_offset)
if self.global_rank == assigned_rank:
offsets[bucket_index] = len(params_per_rank[assigned_rank])
params_per_rank[assigned_rank].extend(bucket_params)
assigned_ranks_per_bucket[bucket_index].add(assigned_rank)
self._overlap_info.num_bucket_assignments += 1
@property
def _bucket_assignments_per_rank(
self
) -> List[Dict[int, _DDPBucketAssignment]]:
r"""
:class:`list` of length world size consisting of :class:`dict` s
mapping bucket indices to :class:`_DDPBucketAssignment` s for each
rank.
"""
assert self._overlap_with_ddp, "`_bucket_assignments_per_rank` " \
"only be used if `overlap_with_ddp=True`"
if len(self._bucket_assignments_per_rank_cache) > 0:
return self._bucket_assignments_per_rank_cache
overlap_info = self._overlap_info
assert overlap_info.status == _OverlapStatus.INITIALIZED
self._bucket_assignments_per_rank_cache = [{} for _ in range(self.world_size)]
params_per_bucket = overlap_info.params_per_bucket
if overlap_info.shard_buckets:
# Define the assignment threshold to approximate uniformity
assert overlap_info.total_size is not None, \
"`total_size` was not computed"
threshold = overlap_info.total_size / self.world_size # type: ignore[operator]
size_per_rank = [0 for _ in range(self.world_size)]
num_buckets = len(params_per_bucket)
overlap_info.assigned_ranks_per_bucket = [set() for _ in range(num_buckets)]
assigned_ranks_per_bucket = overlap_info.assigned_ranks_per_bucket
if not overlap_info.shard_buckets:
# Assign each DDP bucket entirely to a single rank
for bucket_index, bucket_params in enumerate(params_per_bucket):
assert len(bucket_params) > 0, "Empty bucket"
assigned_rank = self._get_assigned_rank(bucket_index)
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params,
0,
assigned_rank,
assigned_ranks_per_bucket,
)
else:
# Assign each DDP bucket to possibly multiple ranks
# Specifically, sort the DDP buckets by increasing size, and for
# each bucket, iteratively assign the maximal unassigned subset
# with size less than `threshold` to the rank with the least total
# size so far -- each such assignment is represented by a
# `_DDPBucketAssignment` instance and only contains parameters from
# a single DDP bucket
params_per_bucket_enum = sorted(
enumerate(params_per_bucket),
key=lambda x: sum(p.numel() for p in x[1])
)
for bucket_index, bucket_params in params_per_bucket_enum:
assert len(bucket_params) > 0, "Empty bucket"
bucket_offset = 0
assignment_size = 0
for param_index, param in enumerate(bucket_params):
param_numel = param.numel()
if assignment_size + param_numel >= threshold and param_index > bucket_offset:
assigned_rank = self._get_min_index(size_per_rank, assigned_ranks_per_bucket[bucket_index])
# Include up to but not including the parameter that
# exceeded the threshold
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params[bucket_offset:param_index],
bucket_offset,
assigned_rank,
assigned_ranks_per_bucket,
)
size_per_rank[assigned_rank] += assignment_size
bucket_offset = param_index
assignment_size = 0
assignment_size += param_numel
# Assign the remainder of the bucket so that no assignment
# spans across two buckets
assigned_rank = self._get_min_index(size_per_rank, assigned_ranks_per_bucket[bucket_index])
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params[bucket_offset:],
bucket_offset,
assigned_rank,
assigned_ranks_per_bucket,
)
size_per_rank[assigned_rank] += assignment_size
return self._bucket_assignments_per_rank_cache
def _local_step(
self,
gradients: Optional[List[Optional[torch.Tensor]]] = None,
closure: Optional[Callable[[], float]] = None,
**kwargs: Any,
| |
[c_void_p]
isl.isl_union_map_from_domain_and_range.restype = c_void_p
isl.isl_union_map_from_domain_and_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_from_range.restype = c_void_p
isl.isl_union_map_from_range.argtypes = [c_void_p]
isl.isl_union_map_gist.restype = c_void_p
isl.isl_union_map_gist.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_gist_domain.restype = c_void_p
isl.isl_union_map_gist_domain.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_gist_params.restype = c_void_p
isl.isl_union_map_gist_params.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_gist_range.restype = c_void_p
isl.isl_union_map_gist_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect.restype = c_void_p
isl.isl_union_map_intersect.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_domain_space.restype = c_void_p
isl.isl_union_map_intersect_domain_space.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_domain_union_set.restype = c_void_p
isl.isl_union_map_intersect_domain_union_set.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_domain_factor_domain.restype = c_void_p
isl.isl_union_map_intersect_domain_factor_domain.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_domain_factor_range.restype = c_void_p
isl.isl_union_map_intersect_domain_factor_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_params.restype = c_void_p
isl.isl_union_map_intersect_params.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_range_space.restype = c_void_p
isl.isl_union_map_intersect_range_space.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_range_union_set.restype = c_void_p
isl.isl_union_map_intersect_range_union_set.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_range_factor_domain.restype = c_void_p
isl.isl_union_map_intersect_range_factor_domain.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_range_factor_range.restype = c_void_p
isl.isl_union_map_intersect_range_factor_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_bijective.argtypes = [c_void_p]
isl.isl_union_map_is_disjoint.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_empty.argtypes = [c_void_p]
isl.isl_union_map_is_equal.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_injective.argtypes = [c_void_p]
isl.isl_union_map_is_single_valued.argtypes = [c_void_p]
isl.isl_union_map_is_strict_subset.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_subset.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_isa_map.argtypes = [c_void_p]
isl.isl_union_map_lexmax.restype = c_void_p
isl.isl_union_map_lexmax.argtypes = [c_void_p]
isl.isl_union_map_lexmin.restype = c_void_p
isl.isl_union_map_lexmin.argtypes = [c_void_p]
isl.isl_union_map_get_map_list.restype = c_void_p
isl.isl_union_map_get_map_list.argtypes = [c_void_p]
isl.isl_union_map_polyhedral_hull.restype = c_void_p
isl.isl_union_map_polyhedral_hull.argtypes = [c_void_p]
isl.isl_union_map_preimage_domain_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_domain_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_domain_multi_pw_aff.restype = c_void_p
isl.isl_union_map_preimage_domain_multi_pw_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_domain_pw_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_domain_pw_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_domain_union_pw_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_domain_union_pw_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_range_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_range_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_range_pw_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_range_pw_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_preimage_range_union_pw_multi_aff.restype = c_void_p
isl.isl_union_map_preimage_range_union_pw_multi_aff.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_product.restype = c_void_p
isl.isl_union_map_product.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_project_out_all_params.restype = c_void_p
isl.isl_union_map_project_out_all_params.argtypes = [c_void_p]
isl.isl_union_map_range.restype = c_void_p
isl.isl_union_map_range.argtypes = [c_void_p]
isl.isl_union_map_range_factor_domain.restype = c_void_p
isl.isl_union_map_range_factor_domain.argtypes = [c_void_p]
isl.isl_union_map_range_factor_range.restype = c_void_p
isl.isl_union_map_range_factor_range.argtypes = [c_void_p]
isl.isl_union_map_range_map.restype = c_void_p
isl.isl_union_map_range_map.argtypes = [c_void_p]
isl.isl_union_map_range_product.restype = c_void_p
isl.isl_union_map_range_product.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_range_reverse.restype = c_void_p
isl.isl_union_map_range_reverse.argtypes = [c_void_p]
isl.isl_union_map_reverse.restype = c_void_p
isl.isl_union_map_reverse.argtypes = [c_void_p]
isl.isl_union_map_get_space.restype = c_void_p
isl.isl_union_map_get_space.argtypes = [c_void_p]
isl.isl_union_map_subtract.restype = c_void_p
isl.isl_union_map_subtract.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_subtract_domain.restype = c_void_p
isl.isl_union_map_subtract_domain.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_subtract_range.restype = c_void_p
isl.isl_union_map_subtract_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_uncurry.restype = c_void_p
isl.isl_union_map_uncurry.argtypes = [c_void_p]
isl.isl_union_map_union.restype = c_void_p
isl.isl_union_map_union.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_universe.restype = c_void_p
isl.isl_union_map_universe.argtypes = [c_void_p]
isl.isl_union_map_wrap.restype = c_void_p
isl.isl_union_map_wrap.argtypes = [c_void_p]
isl.isl_union_map_zip.restype = c_void_p
isl.isl_union_map_zip.argtypes = [c_void_p]
isl.isl_union_map_copy.restype = c_void_p
isl.isl_union_map_copy.argtypes = [c_void_p]
isl.isl_union_map_free.restype = c_void_p
isl.isl_union_map_free.argtypes = [c_void_p]
isl.isl_union_map_to_str.restype = POINTER(c_char)
isl.isl_union_map_to_str.argtypes = [c_void_p]
class map(union_map):
def __init__(self, *args, **keywords):
if "ptr" in keywords:
self.ctx = keywords["ctx"]
self.ptr = keywords["ptr"]
return
if len(args) == 1 and args[0].__class__ is basic_map:
self.ctx = Context.getDefaultInstance()
self.ptr = isl.isl_map_from_basic_map(isl.isl_basic_map_copy(args[0].ptr))
return
if len(args) == 1 and type(args[0]) == str:
self.ctx = Context.getDefaultInstance()
self.ptr = isl.isl_map_read_from_str(self.ctx, args[0].encode('ascii'))
return
raise Error
def __del__(self):
if hasattr(self, 'ptr'):
isl.isl_map_free(self.ptr)
def __str__(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ptr = isl.isl_map_to_str(arg0.ptr)
res = cast(ptr, c_char_p).value.decode('ascii')
libc.free(ptr)
return res
def __repr__(self):
s = str(self)
if '"' in s:
return 'isl.map("""%s""")' % s
else:
return 'isl.map("%s")' % s
def affine_hull(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_affine_hull(isl.isl_map_copy(arg0.ptr))
obj = basic_map(ctx=ctx, ptr=res)
return obj
def apply_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).apply_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_apply_domain(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def apply_range(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).apply_range(arg1)
ctx = arg0.ctx
res = isl.isl_map_apply_range(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def as_pw_multi_aff(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_as_pw_multi_aff(isl.isl_map_copy(arg0.ptr))
obj = pw_multi_aff(ctx=ctx, ptr=res)
return obj
def bind_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is multi_id:
arg1 = multi_id(arg1)
except:
return union_map(arg0).bind_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_bind_domain(isl.isl_map_copy(arg0.ptr), isl.isl_multi_id_copy(arg1.ptr))
obj = set(ctx=ctx, ptr=res)
return obj
def bind_range(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is multi_id:
arg1 = multi_id(arg1)
except:
return union_map(arg0).bind_range(arg1)
ctx = arg0.ctx
res = isl.isl_map_bind_range(isl.isl_map_copy(arg0.ptr), isl.isl_multi_id_copy(arg1.ptr))
obj = set(ctx=ctx, ptr=res)
return obj
def coalesce(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_coalesce(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def complement(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_complement(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def curry(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_curry(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def deltas(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_deltas(isl.isl_map_copy(arg0.ptr))
obj = set(ctx=ctx, ptr=res)
return obj
def detect_equalities(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_detect_equalities(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def domain(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_domain(isl.isl_map_copy(arg0.ptr))
obj = set(ctx=ctx, ptr=res)
return obj
def domain_factor_domain(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_domain_factor_domain(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def domain_factor_range(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_domain_factor_range(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def domain_product(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).domain_product(arg1)
ctx = arg0.ctx
res = isl.isl_map_domain_product(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def domain_tuple_dim(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_domain_tuple_dim(arg0.ptr)
if res < 0:
raise
return int(res)
def domain_tuple_id(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_get_domain_tuple_id(arg0.ptr)
obj = id(ctx=ctx, ptr=res)
return obj
def get_domain_tuple_id(arg0):
return arg0.domain_tuple_id()
@staticmethod
def empty(arg0):
try:
if not arg0.__class__ is space:
arg0 = space(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_empty(isl.isl_space_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def eq_at(*args):
if len(args) == 2 and args[1].__class__ is multi_pw_aff:
ctx = args[0].ctx
res = isl.isl_map_eq_at_multi_pw_aff(isl.isl_map_copy(args[0].ptr), isl.isl_multi_pw_aff_copy(args[1].ptr))
obj = map(ctx=ctx, ptr=res)
return obj
raise Error
def factor_domain(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_factor_domain(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def factor_range(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_factor_range(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def flatten(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def flatten_domain(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten_domain(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def flatten_range(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten_range(isl.isl_map_copy(arg0.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def foreach_basic_map(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
exc_info = [None]
fn = CFUNCTYPE(c_int, c_void_p, c_void_p)
def cb_func(cb_arg0, cb_arg1):
cb_arg0 = basic_map(ctx=arg0.ctx, ptr=(cb_arg0))
try:
arg1(cb_arg0)
except BaseException as e:
exc_info[0] = e
return -1
return 0
cb = fn(cb_func)
ctx = arg0.ctx
res = isl.isl_map_foreach_basic_map(arg0.ptr, cb, None)
if exc_info[0] is not None:
raise exc_info[0]
if res < 0:
raise
def gist(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).gist(arg1)
ctx = arg0.ctx
res = isl.isl_map_gist(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def gist_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).gist_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_gist_domain(isl.isl_map_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def has_domain_tuple_id(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_has_domain_tuple_id(arg0.ptr)
if res < 0:
raise
return bool(res)
def has_range_tuple_id(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_has_range_tuple_id(arg0.ptr)
if res < 0:
raise
return bool(res)
def intersect(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).intersect(arg1)
ctx = arg0.ctx
res = isl.isl_map_intersect(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
obj = map(ctx=ctx, ptr=res)
return obj
def intersect_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).intersect_domain(arg1)
ctx = arg0.ctx
res = | |
# -*- coding: utf-8 -*-
"""Create a tmux workspace from a configuration :py:obj:`dict`.
Ref: [tmuxp.workspacebuilder](https://github.com/tmux-python/tmuxp)
"""
import re
import json
import subprocess
from libtmux.exc import TmuxSessionExists
from libtmux.pane import Pane
from libtmux.server import Server
from libtmux.session import Session
from libtmux.window import Window
from vimgdb.base.common import Common
class Workspace(Common):
def __init__(self, common: Common, sconf, builtinPanes, start_directory='', server=None, win_width=800, win_height=600):
super().__init__(common)
if not sconf:
raise Exception('Layout configuration is empty.')
# config.validate_schema(sconf)
if isinstance(server, Server):
self.server = server
else:
self.server = None
self.sconf = sconf
self.win_width = int(win_width)
self.win_height = int(win_height)
self.layouts = {} # Every window is a layout of panes/workspace
# So window is also a layout
self.curr_layout = None
self.build_not_dry_run = True
self.builtinPanes = builtinPanes
self.start_directory = start_directory
def session_exists(self, session_name=None):
exists = self.server.has_session(session_name)
if not exists:
return exists
self.session = self.server.find_where({'session_name': session_name})
return True
def build_all_layout_codes(self, sessName: str):
# Kill old existed dummy session
session = self.server.find_where({ "session_name": f"{sessName}" })
if session:
session.kill_session()
session = None
self.build_not_dry_run = False
self.build()
# 2: layout2* (3 panes) [800x600] [layout 9102,800x600,0,0[800x300,0,0,217,800x149,0,301,218,800x149,0,451,219]] @113 (active)
winInfo = subprocess.check_output(
['tmux', 'lsw', '-t', Common.tmux_vimgdb_session_name])
winInfo = winInfo.decode()
for win_line in winInfo.split("\n"):
#self.logger.info(f"list-window: {win_line}")
m = re.search(r'\d+: ([\w@_]+).*panes.* \[(.*)x(.*)\] \[layout (.*)\]', win_line)
if m:
self.logger.info(f"list-window: {m.groups()}")
layoutName = m.group(1)
#self.win_width = m.group(2)
#self.win_height = m.group(3)
layoutCode = m.group(4)
if layoutName not in self.layouts:
self.layouts[layoutName] = {}
self.layouts[layoutName].update({'layout': layoutCode})
self.session.kill_session()
self.session = None
self.build_not_dry_run = True
return self.layouts
def build_one_layout(self, layout: str, session: Session, win: Window, pane: Pane, paneName):
assert isinstance(session, Session)
assert isinstance(win, Window)
assert isinstance(pane, Pane)
self.server._list_sessions()
assert self.server.has_session(session.name)
assert session.id
#assert session.find_where(window_name=win.name)
self.session = session
self.server = session.server
focus = None
if self.curr_layout:
self.logger.info(f"Rebuild '{layout}' layout on running {self.curr_layout}")
else:
self.logger.info(f"Build a new layout with Context: {self.layouts}")
tmux_info = subprocess.check_output(
['tmux', 'display-message', '-t', session.name, '-p', '#{window_width};#{window_height}'])
tmux_info = tmux_info.decode()
[win_width, win_height] = tmux_info.strip().split(';')
self.win_width = int(win_width)
self.win_height = int(win_height)
if 'options' in self.sconf:
for option, value in self.sconf['options'].items():
self.session.set_option(option, value)
pass
if 'global_options' in self.sconf:
for option, value in self.sconf['global_options'].items():
self.session.set_option(option, value, _global=True)
if self.build_not_dry_run and 'environment' in self.sconf:
for option, value in self.sconf['environment'].items():
self.session.set_environment(option, value)
for winName, win, wconf in self.iter_create_windows(session, win, layout):
self.create_panes(winName, win, wconf, pane, paneName)
if focus:
focus.select_window()
assert layout in self.layouts
self.curr_layout = self.layouts[layout]
def build(self, session=None):
#self.logger.info(f"connect layouts: {self.sconf}")
if not session:
if not self.server:
raise Exception(
'Layout.build requires server to be passed '
+ 'on initialization, or pass in session object to here.'
)
if self.server.has_session(self.sconf['session_name']):
self.session = self.server.find_where(
{'session_name': self.sconf['session_name']}
)
raise Exception(
'Session name %s is already running.' % self.sconf['session_name']
)
else:
session = self.server.new_session(
session_name=self.sconf['session_name']
)
assert self.sconf['session_name'] == session.name
assert len(self.sconf['session_name']) > 0
self.session = session
self.server = session.server
self.server._list_sessions()
assert self.server.has_session(session.name)
assert session.id
assert isinstance(session, Session)
focus = None
tmux_info = subprocess.check_output(
['tmux', 'display-message', '-t', session.name, '-p', '#{window_width};#{window_height}'])
tmux_info = tmux_info.decode()
[win_width, win_height] = tmux_info.strip().split(';')
self.win_width = int(win_width)
self.win_height = int(win_height)
if 'options' in self.sconf:
for option, value in self.sconf['options'].items():
self.session.set_option(option, value)
pass
if 'global_options' in self.sconf:
for option, value in self.sconf['global_options'].items():
self.session.set_option(option, value, _global=True)
if self.build_not_dry_run and 'environment' in self.sconf:
for option, value in self.sconf['environment'].items():
self.session.set_environment(option, value)
for winName, win, wconf in self.iter_create_windows(session):
self.create_panes(winName, win, wconf, None, None)
if focus:
focus.select_window()
def create_panes(self, winName, win: Window, wconf, pane: Pane, paneName):
assert isinstance(win, Window)
focus_pane = None
for p, pconf in self.iter_create_panes(winName, win, wconf, pane, paneName):
assert isinstance(p, Pane)
p = p
if 'layout' in wconf:
win.select_layout(wconf['layout'])
if 'width' in pconf:
p.resize_pane(width=pconf['width'])
self.logger.info(f"connect resize_pane: {pconf['width']}")
if 'height' in pconf:
p.resize_pane(height=pconf['height'])
if 'focus' in pconf and pconf['focus']:
focus_pane = p
if 'focus' in wconf and wconf['focus']:
focus = win
self.config_after_window(win, wconf)
if self.build_not_dry_run and focus_pane:
focus_pane.select_pane()
def iter_create_windows(self, ses: Session, runningWin: Window=None, winName: str=''):
if runningWin: # Just create this specific window
assert isinstance(runningWin, Window)
assert winName
self.logger.info(f"rebuild '{winName}' layout on current window")
else:
self.logger.info(f"try create all windows")
for i, wconf in enumerate(self.sconf['windows'], start=1):
if runningWin: # Just create this specific window
if 'window_name' in wconf and winName == wconf['window_name']:
window_name = winName
else:
continue
elif 'window_name' in wconf:
window_name = wconf['window_name']
else: # assign a dummy name
window_name = 'Null'
if window_name not in self.layouts:
self.layouts[window_name] = {"window_name": window_name}
if runningWin: # rebuild layout on current window/layout
win = runningWin
else:
w1 = None
if i == int(1): # if first window, use window 1
self.logger.info(f"window '{window_name}' select attached")
w1 = ses.attached_window
w1.move_window(99)
pass
self.logger.info(f"window '{window_name}' create new")
if 'start_directory' in wconf:
sd = wconf['start_directory']
else:
sd = self.start_directory
if 'window_shell' in wconf:
ws = wconf['window_shell']
else:
ws = None
win = ses.new_window(
window_name=window_name,
start_directory=sd,
attach=False, # do not move to the new window
window_index=wconf.get('window_index', ''),
window_shell=ws,
)
if i == int(1) and w1: # if first window, use window 1
self.logger.info(f"window '{window_name}' create as first window, kill the old 1st-window")
w1.kill_window()
assert isinstance(win, Window)
ses.server._update_windows()
if 'options' in wconf and isinstance(wconf['options'], dict):
for option, value in wconf['options'].items():
if option.startswith('#'):
continue
if isinstance(value, str):
# "70%"
if value.endswith('%'):
if option.endswith('-width'):
value = int(self.win_width * int(value.strip('%')) / 100)
self.logger.info(f"connect set-width {self.win_width}-{value}")
if option.endswith('-height'):
value = int(self.win_height * int(value.strip('%')) / 100)
self.logger.info(f"connect set-height {self.win_height}-{value}")
elif value.startswith('0.'):
if option.endswith('-width'):
value = int(self.win_width * float(value))
self.logger.info(f"connect set-width {self.win_width}-{value}")
if option.endswith('-height'):
value = int(self.win_height * float(value))
self.logger.info(f"connect set-height {self.win_height}-{value}")
if isinstance(value, float) and value < 1:
if option.endswith('-width'):
value = int(self.win_width * value)
self.logger.info(f"connect set-width {self.win_width}-{value}")
if option.endswith('-height'):
value = int(self.win_height * value)
self.logger.info(f"connect set-height {self.win_height}-{value}")
win.set_window_option(option, value)
if 'focus' in wconf and wconf['focus']:
win.select_window()
ses.server._update_windows()
yield window_name, win, wconf
def get_pane(self, winName, paneName):
assert winName in self.layouts
layout = self.layouts[winName]
assert 'panes' in layout
panes = layout['panes']
assert paneName in panes
assert 'this' in panes[paneName]
return panes[paneName]['this']
def iter_create_panes(self, winName, w: Window, wconf, vim_pane: Pane, paneName):
assert isinstance(w, Window)
if vim_pane:
assert isinstance(vim_pane, Pane)
assert winName in self.layouts
pane_base_index = int(w.show_window_option('pane-base-index', g=True))
p = None
if self.build_not_dry_run:
layout = self.layouts[winName]
if 'panes' not in layout:
layout['panes'] = {}
panes = layout['panes']
assert isinstance(panes, dict)
for pindex, pconf in enumerate(wconf['panes'], start=pane_base_index):
isNewPane = False
if 'pane_name' in pconf:
pane_name = pconf['pane_name']
else:
pane_name = 'Null'
if self.build_not_dry_run:
if pane_name not in panes:
panes[pane_name] = {}
pane = panes[pane_name]
else:
pane = {}
if vim_pane and pane_name == paneName:
p = vim_pane
# find old same pane
elif self.curr_layout \
and 'panes' in self.curr_layout \
and pane_name in self.curr_layout['panes'] \
and 'this' in self.curr_layout['panes'][pane_name]:
self.logger.info(f"Runing window '{self.curr_layout['window_name']}' have pane '{pane_name}', reuse it.")
p = self.curr_layout['panes'][pane_name]['this']
else:
if pindex == int(pane_base_index):
self.logger.info(f"pane '{pane_name}' select attached")
p = w.attached_pane
else:
self.logger.info(f"pane '{pane_name}' create new")
def get_pane_start_directory():
if 'start_directory' in pconf:
return pconf['start_directory']
elif 'start_directory' in wconf:
return wconf['start_directory']
else:
return self.start_directory
p = w.split_window(attach=True,
start_directory=get_pane_start_directory()
#target=p.id
)
isNewPane = True
if p:
assert isinstance(p, Pane)
p.update({'pane_name': pane_name})
pane.update({'this': p})
thePane = p
if 'layout' in wconf:
w.select_layout(wconf['layout'])
if self.build_not_dry_run:
if 'suppress_history' in pconf:
suppress = pconf['suppress_history']
elif 'suppress_history' in wconf:
suppress = wconf['suppress_history']
else:
suppress = True
# recursive diction/list in json
try:
#self.logger.info(f"connect Panes: {pconf}")
if isinstance(pconf, str):
#decoded_data=pconf.encode().decode('utf-8-sig')
pconf = json.loads(pconf, strict=False)
except Exception as e:
self.logger.info(f"connect Panes: {str(e)}")
if self.build_not_dry_run and isNewPane:
if pane_name in self.builtinPanes:
cmd = self.builtinPanes[pane_name]
pane['cmd'] = cmd
if cmd:
self.logger.info(f"pane '{pane_name}' start-builtin-cmd: {cmd}")
thePane.send_keys(cmd, suppress_history=suppress)
else:
isFirstcmd = True
for cmd in pconf['shell_command']:
if isFirstcmd:
isFirstcmd = False
pane['cmd'] = cmd
if cmd:
self.logger.info(f"pane '{pane_name}' start-conf-cmd: {cmd}")
thePane.send_keys(cmd, suppress_history=suppress)
if self.build_not_dry_run and 'focus' in pconf and pconf['focus']:
w.select_pane(thePane['pane_id'])
w.server._update_panes()
yield thePane, pconf
def config_after_window(self, w, wconf):
if 'options_after' in wconf and isinstance(wconf['options_after'], dict):
for key, val in wconf['options_after'].items():
w.set_window_option(key, val)
@staticmethod
def Save(session):
sconf = {'session_name': session['session_name'], 'windows': []}
for w in session.windows:
wconf = {
'options': w.show_window_options(),
'window_name': w.name,
'layout': w.layout,
'panes': [],
}
if w.get('window_active', '0') == '1':
wconf['focus'] = 'true'
# If all panes have same path, set 'start_directory' instead
# of using 'cd' shell commands.
def pane_has_same_path(p):
return w.panes[0].current_path == p.current_path
if all(pane_has_same_path(p) for p in w.panes):
wconf['start_directory'] = w.panes[0].current_path
for p | |
import numpy as np
import xarray as xr
import laspy
import os
from time import perf_counter
from datetime import datetime
from HSTB.kluster.pydro_helpers import is_pydro
from HSTB.kluster.pdal_entwine import build_entwine_points
class FqprExport:
"""
Visualizations in Matplotlib built on top of FQPR class. Includes animations of beam vectors and vessel
orientation.
Processed fqpr_generation.Fqpr instance is passed in as argument
"""
def __init__(self, fqpr):
"""
Parameters
----------
fqpr
Fqpr instance to export from
"""
self.fqpr = fqpr
def _generate_export_data(self, ping_dataset: xr.Dataset, filter_by_detection: bool = True, z_pos_down: bool = True):
"""
Take the georeferenced data in the multibeam.raw_ping datasets held by fqpr_generation.Fqpr (ping_dataset is one of those
raw_ping datasets) and build the necessary arrays for exporting.
Parameters
----------
ping_dataset
one of the multibeam.raw_ping xarray Datasets, must contain the x,y,z variables generated by georeferencing
filter_by_detection
if True, will filter the xyz data by the detection info flag (rejected by multibeam system)
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
Returns
-------
xr.DataArray
x variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
y variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
z variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
xr.DataArray
uncertainty variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
np.array
indexes of the original z data before stacking, used to unstack x
np.array
if detectioninfo exists, this is the integer classification for each sounding
np.array
if detectioninfo exists, boolean mask for the valid detections
bool
if tvu exists, True
"""
uncertainty_included = False
nan_mask = ~np.isnan(ping_dataset['x'])
x_stck = ping_dataset['x'][nan_mask]
y_stck = ping_dataset['y'][nan_mask]
z_stck = ping_dataset['z'][nan_mask]
if 'tvu' in ping_dataset:
uncertainty_included = True
unc_stck = ping_dataset['tvu'][nan_mask]
# build mask with kongsberg detection info
classification = None
valid_detections = None
if 'detectioninfo' in ping_dataset:
dinfo = ping_dataset.detectioninfo
filter_stck = dinfo.values[nan_mask]
# filter_idx, filter_stck = stack_nan_array(dinfo, stack_dims=('time', 'beam'))
valid_detections = filter_stck != 2
tot = len(filter_stck)
tot_valid = np.count_nonzero(valid_detections)
tot_invalid = tot - tot_valid
self.fqpr.logger.info(
'{}: {} total soundings, {} retained, {} filtered'.format(ping_dataset.system_identifier, tot, tot_valid,
tot_invalid))
# filter points by mask
unc = None
if filter_by_detection and valid_detections is not None:
x = x_stck[valid_detections]
y = y_stck[valid_detections]
z = z_stck[valid_detections]
classification = filter_stck[valid_detections]
if uncertainty_included:
unc = unc_stck[valid_detections]
else:
x = x_stck
y = y_stck
z = z_stck
if 'detectioninfo' in ping_dataset:
classification = filter_stck
if uncertainty_included:
unc = unc_stck
# z positive down is the native convention in Kluster, if you want positive up, gotta flip
if not z_pos_down:
z = z * -1
return x, y, z, unc, nan_mask, classification, valid_detections, uncertainty_included
def export_pings_to_file(self, output_directory: str = None, file_format: str = 'csv', csv_delimiter=' ',
filter_by_detection: bool = True, z_pos_down: bool = True, export_by_identifiers: bool = True):
"""
Uses the output of georef_along_across_depth to build sounding exports. Currently you can export to csv, las or
entwine file formats, see file_format argument.
If you export to las and want to retain rejected soundings under the noise classification, set
filter_by_detection to False.
Filters using the detectioninfo variable if present in multibeam and filter_by_detection is set. Set z_pos_down
to False if you want positive up. Otherwise you get positive down.
Will generate an xyz file for each sector in multibeam. Results in one xyz file for each freq/sector id/serial
number combination.
entwine export will build las first, and then entwine from las
Parameters
----------
output_directory
optional, destination directory for the xyz exports, otherwise will auto export next to converted data
file_format
optional, destination file format, default is csv file, options include ['csv', 'las', 'entwine']
csv_delimiter
optional, if you choose file_format=csv, this will control the delimiter
filter_by_detection
optional, if True will only write soundings that are not rejected
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
export_by_identifiers
if True, will generate separate files for each combination of serial number/sector/frequency
Returns
-------
list
list of written file paths
"""
if 'x' not in self.fqpr.multibeam.raw_ping[0]:
self.fqpr.logger.error('export_pings_to_file: No xyz data found, please run All Processing - Georeference Soundings first.')
return
if file_format not in ['csv', 'las', 'entwine']:
self.fqpr.logger.error('export_pings_to_file: Only csv, las and entwine format options supported at this time')
return
if file_format == 'entwine' and not is_pydro():
self.fqpr.logger.error(
'export_pings_to_file: Only pydro environments support entwine tile building. Please see https://entwine.io/configuration.html for instructions on installing entwine if you wish to use entwine outside of Kluster. Kluster exported las files will work with the entwine build command')
if output_directory is None:
output_directory = self.fqpr.multibeam.converted_pth
self.fqpr.logger.info('****Exporting xyz data to {}****'.format(file_format))
if file_format == 'csv':
fldr_path = _create_folder(output_directory, 'csv_export')
written_files = self._export_pings_to_csv(output_directory=fldr_path, csv_delimiter=csv_delimiter,
filter_by_detection=filter_by_detection, z_pos_down=z_pos_down,
export_by_identifiers=export_by_identifiers)
elif file_format == 'las':
fldr_path = _create_folder(output_directory, 'las_export')
written_files = self._export_pings_to_las(output_directory=fldr_path, filter_by_detection=filter_by_detection,
z_pos_down=z_pos_down, export_by_identifiers=export_by_identifiers)
elif file_format == 'entwine':
fldr_path = _create_folder(output_directory, 'las_export')
entwine_fldr_path = _create_folder(output_directory, 'entwine_export')
written_files = self.export_pings_to_entwine(output_directory=entwine_fldr_path, las_export_folder=fldr_path,
filter_by_detection=filter_by_detection, z_pos_down=z_pos_down,
export_by_identifiers=export_by_identifiers)
else:
raise NotImplementedError('export_pings_to_file: {} is not a supported file format'.format(file_format))
return written_files
def _export_pings_to_csv(self, output_directory: str = None, csv_delimiter=' ', filter_by_detection: bool = True,
z_pos_down: bool = True, export_by_identifiers: bool = True):
"""
Method for exporting pings to csv files. See export_pings_to_file to use.
Parameters
----------
output_directory
destination directory for the xyz exports, otherwise will auto export next to converted data
csv_delimiter
optional, if you choose file_format=csv, this will control the delimiter
filter_by_detection
optional, if True will only write soundings that are not rejected
z_pos_down
if True, will export soundings with z positive down (this is the native Kluster convention)
export_by_identifiers
if True, will generate separate files for each combination of serial number/sector/frequency
Returns
-------
list
list of written file paths
"""
starttime = perf_counter()
written_files = []
for rp in self.fqpr.multibeam.raw_ping:
self.fqpr.logger.info('Operating on system {}'.format(rp.system_identifier))
if filter_by_detection and 'detectioninfo' not in rp:
self.fqpr.logger.error('_export_pings_to_csv: Unable to filter by detection type, detectioninfo not found')
return
rp = rp.stack({'sounding': ('time', 'beam')})
if export_by_identifiers:
for freq in np.unique(rp.frequency):
subset_rp = rp.where(rp.frequency == freq, drop=True)
for secid in np.unique(subset_rp.txsector_beam).astype(np.int):
sec_subset_rp = subset_rp.where(subset_rp.txsector_beam == secid, drop=True)
dest_path = os.path.join(output_directory, '{}_{}_{}.csv'.format(rp.system_identifier, secid, freq))
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(sec_subset_rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._csv_write(export_data[0], export_data[1], export_data[2], export_data[3], export_data[7],
dest_path, csv_delimiter)
written_files.append(dest_path)
else:
dest_path = os.path.join(output_directory, rp.system_identifier + '.csv')
self.fqpr.logger.info('writing to {}'.format(dest_path))
export_data = self._generate_export_data(rp, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down)
self._csv_write(export_data[0], export_data[1], export_data[2], export_data[3], export_data[7],
dest_path, csv_delimiter)
written_files.append(dest_path)
endtime = perf_counter()
self.fqpr.logger.info('****Exporting xyz data to csv complete: {}s****\n'.format(round(endtime - starttime, 1)))
return written_files
def _csv_write(self, x: xr.DataArray, y: xr.DataArray, z: xr.DataArray, uncertainty: xr.DataArray,
uncertainty_included: bool, dest_path: str, delimiter: str):
"""
Write the data to csv
Parameters
----------
x
x variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
y
y variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
z
z variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
uncertainty
uncertainty variable stacked in the time/beam dimension to create 1 dim representation. rejected soundings removed
if filter_by_detection
uncertainty_included
if tvu exists, True
dest_path
output path to write to
delimiter
csv delimiter to use
"""
if uncertainty_included:
np.savetxt(dest_path, np.c_[x, y, z, uncertainty],
fmt=['%3.3f', '%2.3f', '%4.3f', '%4.3f'],
delimiter=delimiter,
header='easting{}northing{}depth{}uncertainty'.format(delimiter, delimiter, delimiter),
comments='')
else:
np.savetxt(dest_path, np.c_[x, y, z],
fmt=['%3.3f', '%2.3f', '%4.3f'],
delimiter=delimiter,
header='easting{}northing{}depth'.format(delimiter, delimiter),
comments='')
def _export_pings_to_las(self, output_directory: str = None, filter_by_detection: bool = True, z_pos_down: bool = True,
export_by_identifiers: bool = True):
"""
Uses the output of georef_along_across_depth to build sounding exports. Currently you can export to csv or las
file formats, see file_format argument.
If you export to las and want to retain rejected soundings under the noise classification, set
filter_by_detection to False.
Filters using the detectioninfo variable if present in multibeam and filter_by_detection is set.
Will generate an xyz file for each sector in multibeam. Results in one xyz file for each freq/sector id/serial
number combination.
entwine export will | |
{ipv4}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ip_address_secondary(
self, ipv4):
"""
Set secondary IP address
This function runs the following vtysh command:
::
# ip address {ipv4} secondary
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'ip address {ipv4} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ip_address_secondary(
self, ipv4):
"""
Unset secondary IP address
This function runs the following vtysh command:
::
# no ip address {ipv4} secondary
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'no ip address {ipv4} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ipv6_address(
self, ipv6):
"""
Set IPv6 address
This function runs the following vtysh command:
::
# ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ipv6_address(
self, ipv6):
"""
Unset IPv6 address
This function runs the following vtysh command:
::
# no ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'no ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ipv6_address_secondary(
self, ipv6):
"""
Set secondary IPv6 address
This function runs the following vtysh command:
::
# ipv6 address {ipv6} secondary
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'ipv6 address {ipv6} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ipv6_address_secondary(
self, ipv6):
"""
Unset IPv6 address
This function runs the following vtysh command:
::
# no ipv6 address {ipv6} secondary
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'no ipv6 address {ipv6} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def shutdown(
self):
"""
Enable an interface.
This function runs the following vtysh command:
::
# shutdown
"""
cmd = [
'shutdown'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_shutdown(
self):
"""
Disable an interface.
This function runs the following vtysh command:
::
# no shutdown
"""
cmd = [
'no shutdown'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
class ConfigInterfaceLoopback(ContextManager):
"""
Loopback interface configuration.
pre_commands:
::
['config terminal', 'interface loopback {loopback_id}']
post_commands:
::
['end']
"""
def __init__(self, enode, loopback_id):
self.enode = enode
self.loopback_id = loopback_id
def __enter__(self):
commands = """\
config terminal
interface loopback {loopback_id}
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
return self
def __exit__(self, type, value, traceback):
commands = """\
end
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
def ip_address(
self, ipv4):
"""
Set IPv4 address for loopback
This function runs the following vtysh command:
::
# ip address {ipv4}
:param ipv4: A.B.C.D/M Loopback IP address.
"""
cmd = [
'ip address {ipv4}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ip_address(
self, ipv4):
"""
Unset IPv4 address for loopback
This function runs the following vtysh command:
::
# no ip address {ipv4}
:param ipv4: A.B.C.D/M Loopback IP address.
"""
cmd = [
'no ip address {ipv4}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ipv6_address(
self, ipv6):
"""
Set IPv6 address on Loopback
This function runs the following vtysh command:
::
# ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Loopback IPv6 address
"""
cmd = [
'ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ipv6_address(
self, ipv6):
"""
Unset IPv6 address on loopback interface
This function runs the following vtysh command:
::
# no ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Loopback IPv6 address
"""
cmd = [
'no ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
class ConfigInterfaceLag(ContextManager):
"""
Configure link-aggregation parameters.
pre_commands:
::
['config terminal', 'interface lag {lag}']
post_commands:
::
['end']
"""
def __init__(self, enode, lag):
self.enode = enode
self.lag = lag
def __enter__(self):
commands = """\
config terminal
interface lag {lag}
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
return self
def __exit__(self, type, value, traceback):
commands = """\
end
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
def ip_address(
self, ipv4):
"""
Set IP address
This function runs the following vtysh command:
::
# ip address {ipv4}
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'ip address {ipv4}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ip_address(
self, ipv4):
"""
Unset IP address
This function runs the following vtysh command:
::
# no ip address {ipv4}
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'no ip address {ipv4}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ip_address_secondary(
self, ipv4):
"""
Set secondary IP address
This function runs the following vtysh command:
::
# ip address {ipv4} secondary
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'ip address {ipv4} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ip_address_secondary(
self, ipv4):
"""
Unset secondary IP address
This function runs the following vtysh command:
::
# no ip address {ipv4} secondary
:param ipv4: A.B.C.D/M Interface IP address.
"""
cmd = [
'no ip address {ipv4} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ipv6_address(
self, ipv6):
"""
Set IPv6 address
This function runs the following vtysh command:
::
# ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ipv6_address(
self, ipv6):
"""
Unset IPv6 address
This function runs the following vtysh command:
::
# no ipv6 address {ipv6}
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'no ipv6 address {ipv6}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ipv6_address_secondary(
self, ipv6):
"""
Set secondary IPv6 address
This function runs the following vtysh command:
::
# ipv6 address {ipv6} secondary
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'ipv6 address {ipv6} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ipv6_address_secondary(
self, ipv6):
"""
Unset IPv6 address
This function runs the following vtysh command:
::
# no ipv6 address {ipv6} secondary
:param ipv6: X:X::X:X/M Interface IPv6 address
"""
cmd = [
'no ipv6 address {ipv6} secondary'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def shutdown(
self):
"""
Enable an interface.
This function runs the following vtysh command:
::
# shutdown
"""
cmd = [
'shutdown'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_shutdown(
self):
"""
Disable an interface.
This function runs the following vtysh command:
::
# no shutdown
"""
cmd = [
'no shutdown'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def routing(
self):
"""
Configure interface as L3.
This function runs the following vtysh command:
::
# routing
"""
cmd = [
'routing'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_routing(
self):
"""
Unconfigure interface as L3.
This function runs the following vtysh command:
::
# no routing
"""
cmd = [
'no routing'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def vlan_access(
self, vlan_id):
"""
Access configuration
This function runs the following vtysh command:
::
# vlan access {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'vlan access {vlan_id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_vlan_access(
self, vlan_id):
"""
Remove vlan access
This function runs the following vtysh command:
::
# no vlan access {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'no vlan access {vlan_id}'
]
| |
<filename>mvpa2/clfs/gpr.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# Copyright (c) 2008 <NAME> <<EMAIL>>
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Gaussian Process Regression (GPR)."""
__docformat__ = "restructuredtext"
import numpy as np
from mvpa2.base import externals, warning
from mvpa2.base.constraints import EnsureFloat, EnsureNone, EnsureRange
from mvpa2.base.param import Parameter
from mvpa2.base.state import ConditionalAttribute
from mvpa2.clfs.base import Classifier, accepts_dataset_as_samples
from mvpa2.datasets import Dataset, dataset_wizard
from mvpa2.kernels.np import (
SquaredExponentialKernel,
GeneralizedLinearKernel,
LinearKernel,
)
from mvpa2.measures.base import Sensitivity
from mvpa2.misc.exceptions import InvalidHyperparameterError
if externals.exists("scipy", raise_=True):
from scipy.linalg import cho_solve as SLcho_solve
from scipy.linalg import cholesky as SLcholesky
import scipy.linalg as SL
# Some local binding for bits of speed up
SLAError = SL.basic.LinAlgError
if __debug__:
from mvpa2.base import debug
# Some local bindings for bits of speed up
from numpy import array, asarray
Nlog = np.log
Ndot = np.dot
Ndiag = np.diag
NLAcholesky = np.linalg.cholesky
NLAsolve = np.linalg.solve
NLAError = np.linalg.linalg.LinAlgError
eps64 = np.finfo(np.float64).eps
# Some precomputed items. log is relatively expensive
_halflog2pi = 0.5 * Nlog(2 * np.pi)
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in range(nsteps):
epsilon_value = (10 ** step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError as e:
warning(
"Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e
)
if step < nsteps - 1:
if __debug__:
debug(
"GPR",
"Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C." % (step, epsilon_value, e),
)
continue
else:
raise
if result is None:
# no loop was done for some reason
result = SLcholesky(C, lower=True)
return result
class GPR(Classifier):
"""Gaussian Process Regression (GPR)."""
predicted_variances = ConditionalAttribute(
enabled=False, doc="Variance per each predicted value"
)
log_marginal_likelihood = ConditionalAttribute(
enabled=False, doc="Log Marginal Likelihood"
)
log_marginal_likelihood_gradient = ConditionalAttribute(
enabled=False, doc="Log Marginal Likelihood Gradient"
)
__tags__ = ["gpr", "regression", "retrainable"]
# NOTE XXX Parameters of the classifier. Values available as
# clf.parameter or clf.params.parameter, or as
# clf.params['parameter'] (as the full Parameter object)
#
# __doc__ and __repr__ for class is conviniently adjusted to
# reflect values of those params
# Kernel machines/classifiers should be refactored also to behave
# the same and define kernel parameter appropriately... TODO, but SVMs
# already kinda do it nicely ;-)
sigma_noise = Parameter(
0.001,
constraints=EnsureFloat() & EnsureRange(min=1e-10),
doc="the standard deviation of the gaussian noise.",
)
# XXX For now I don't introduce kernel parameter since yet to unify
# kernel machines
# kernel = Parameter(None, allowedtype='Kernel',
# doc="Kernel object defining the covariance between instances. "
# "(Defaults to KernelSquaredExponential if None in arguments)")
lm = Parameter(
None,
constraints=((EnsureFloat() & EnsureRange(min=0.0)) | EnsureNone()),
doc="""The regularization term lambda.
Increase this when the kernel matrix is not positive definite. If None,
some regularization will be provided upon necessity""",
)
def __init__(self, kernel=None, **kwargs):
"""Initialize a GPR regression analysis.
Parameters
----------
kernel : Kernel
a kernel object defining the covariance between instances.
(Defaults to SquaredExponentialKernel if None in arguments)
"""
# init base class first
Classifier.__init__(self, **kwargs)
# It does not make sense to calculate a confusion matrix for a GPR
# XXX it does ;) it will be a RegressionStatistics actually ;-)
# So if someone desires -- let him have it
# self.ca.enable('training_stats', False)
# set kernel:
if kernel is None:
kernel = SquaredExponentialKernel()
debug("GPR", "No kernel was provided, falling back to default: %s" % kernel)
self.__kernel = kernel
# append proper clf_internal depending on the kernel
# TODO: add "__tags__" to kernels since the check
# below does not scale
if isinstance(kernel, (GeneralizedLinearKernel, LinearKernel)):
self.__tags__ += ["linear"]
else:
self.__tags__ += ["non-linear"]
if externals.exists("openopt") and not "has_sensitivity" in self.__tags__:
self.__tags__ += ["has_sensitivity"]
# No need to initialize conditional attributes. Unless they got set
# they would raise an exception self.predicted_variances =
# None self.log_marginal_likelihood = None
self._init_internals()
pass
def _init_internals(self):
"""Reset some internal variables to None.
To be used in constructor and untrain()
"""
self._train_fv = None
self._labels = None
self._km_train_train = None
self._train_labels = None
self._alpha = None
self._L = None
self._LL = None
# XXX EO: useful for model selection but not working in general
# self.__kernel.reset()
pass
def __repr__(self):
"""String summary of the object"""
return super(GPR, self).__repr__(prefixes=["kernel=%s" % self.__kernel])
def compute_log_marginal_likelihood(self):
"""
Compute log marginal likelihood using self.train_fv and self.targets.
"""
if __debug__:
debug("GPR", "Computing log_marginal_likelihood")
self.ca.log_marginal_likelihood = (
-0.5 * Ndot(self._train_labels, self._alpha)
- Nlog(self._L.diagonal()).sum()
- self._km_train_train.shape[0] * _halflog2pi
)
return self.ca.log_marginal_likelihood
def compute_gradient_log_marginal_likelihood(self):
"""Compute gradient of the log marginal likelihood. This
version use a more compact formula provided by Williams and
Rasmussen book.
"""
# XXX EO: check whether the precomputed self.alpha self.Kinv
# are actually the ones corresponding to the hyperparameters
# used to compute this gradient!
# YYY EO: currently this is verified outside gpr.py but it is
# not an efficient solution.
# XXX EO: Do some memoizing since it could happen that some
# hyperparameters are kept constant by user request, so we
# don't need (somtimes) to recompute the corresponding
# gradient again. COULD THIS BE TAKEN INTO ACCOUNT BY THE
# NEW CACHED KERNEL INFRASTRUCTURE?
# self.Kinv = np.linalg.inv(self._C)
# Faster:
Kinv = SLcho_solve(self._LL, np.eye(self._L.shape[0]))
alphalphaT = np.dot(self._alpha[:, None], self._alpha[None, :])
tmp = alphalphaT - Kinv
# Pass tmp to __kernel and let it compute its gradient terms.
# This scales up to huge number of hyperparameters:
grad_LML_hypers = self.__kernel.compute_lml_gradient(tmp, self._train_fv)
grad_K_sigma_n = 2.0 * self.params.sigma_noise * np.eye(tmp.shape[0])
# Add the term related to sigma_noise:
# grad_LML_sigma_n = 0.5 * np.trace(np.dot(tmp,grad_K_sigma_n))
# Faster formula: tr(AB) = (A*B.T).sum()
grad_LML_sigma_n = 0.5 * (tmp * grad_K_sigma_n.T).sum()
lml_gradient = np.hstack([grad_LML_sigma_n, grad_LML_hypers])
self.log_marginal_likelihood_gradient = lml_gradient
return lml_gradient
def compute_gradient_log_marginal_likelihood_logscale(self):
"""Compute gradient of the log marginal likelihood when
hyperparameters are in logscale. This version use a more
compact formula provided by Williams and Rasmussen book.
"""
# Kinv = np.linalg.inv(self._C)
# Faster:
Kinv = SLcho_solve(self._LL, np.eye(self._L.shape[0]))
alphalphaT = np.dot(self._alpha[:, None], self._alpha[None, :])
tmp = alphalphaT - Kinv
grad_LML_log_hypers = self.__kernel.compute_lml_gradient_logscale(
tmp, self._train_fv
)
grad_K_log_sigma_n = 2.0 * self.params.sigma_noise ** 2 * np.eye(Kinv.shape[0])
# Add the term related to sigma_noise:
# grad_LML_log_sigma_n = 0.5 * np.trace(np.dot(tmp, grad_K_log_sigma_n))
# Faster formula: tr(AB) = (A * B.T).sum()
grad_LML_log_sigma_n = 0.5 * (tmp * grad_K_log_sigma_n.T).sum()
lml_gradient = np.hstack([grad_LML_log_sigma_n, grad_LML_log_hypers])
self.log_marginal_likelihood_gradient = lml_gradient
return lml_gradient
##REF: Name was automagically refactored
def get_sensitivity_analyzer(self, flavor="auto", **kwargs):
"""Returns a sensitivity analyzer for GPR.
Parameters
----------
flavor : str
What sensitivity to provide. Valid values are
'linear', 'model_select', 'auto'.
In case of 'auto' selects 'linear' for linear kernel
and 'model_select' for the rest. 'linear' corresponds to
GPRLinearWeights and 'model_select' to GRPWeights
"""
# XXX The following two lines does not work since
# self.__kernel is instance of LinearKernel and not
# just LinearKernel. How to fix?
# YYY yoh is not sure what is the problem... LinearKernel is actually
# kernel.LinearKernel so everything shoudl be ok
if flavor == "auto":
flavor = ("model_select", "linear")[
int(
isinstance(self.__kernel, GeneralizedLinearKernel)
or isinstance(self.__kernel, LinearKernel)
)
]
if __debug__:
debug("GPR", "Returning '%s' sensitivity analyzer" % flavor)
# Return proper sensitivity
if flavor == "linear":
return GPRLinearWeights(self, **kwargs)
elif flavor == "model_select":
# sanity check
if not ("has_sensitivity" in self.__tags__):
raise ValueError(
"model_select flavor is not available probably "
"due to not available 'openopt' module"
)
return GPRWeights(self, **kwargs)
else:
raise | |
<filename>acc2omp_converter.py
#!/usr/bin/python
# Author: <NAME>
# e-mail: <EMAIL>
# Argonne National Laboratory
# Python imports
import fileinput
import re
from shutil import copyfile
# Most common user configurable parameters
# Set to True for debugging and development purposes
debug = True
# Set to True to retain OpenACC directives in output
keepOpenACC = True
# Lists, dicts, and strings to aid in translation of OpenACC to OpenMP
# Note that the other way would be more difficult since OpenMP tends to
# be more verbose than OpenACC.
# In the variable names in the program, Dir stands for directive
# not directory.
ompDir = '!$omp'
accDir = '!$acc'
ompDirContinue = '!$omp&'
accDirContinue = '!$acc&'
nextLineContinue = '&'
emptyString = ''
singleSpaceString = ' '
transitionArrow = ' -> '
backupExtString = '.bak'
# directives without arguements
singleDirDict = {
'loop': 'parallel do',
'gang': emptyString,
'independent': emptyString,
'parallel': 'target teams distribute',
'vector': 'simd',
'routine': 'declare target',
'seq': emptyString,
'data': 'data',
'end': 'end',
'enter': 'target enter',
'exit': 'target exit',
'atomic': 'atomic',
'serial': 'target',
'declare': 'declare target',
}
dualDirDict = {}
# directives with arguements
singleDirwargsDict = {
'attach': 'map(to:',
'detach': 'map(from:',
'copy': 'map(tofrom:',
'copyin': 'map(to:',
'copyout': 'map(from:',
'create': 'map(alloc:',
'delete': 'map(release:',
'async': 'depend(out:',
'wait': 'task depend(in:',
'collapse': 'collapse(',
'private': 'private(',
'vector_length': 'simd simdlen(',
'num_gangs': 'num_teams(',
'present': emptyString,
}
dualDirwargsDict = {
'update host': 'target update from(',
'update device': 'target update to(',
}
def remove_extra_spaces(origString):
"""
Converter needs extra spaces before and after commas and parenthesis
removed in order work properly.
"""
# Space before and after a comma
newString = re.sub(' *, *', ',', origString)
# Space before and after left parenthesis
newString = re.sub(' *\( *', '(', newString)
# Space before and after right parenthesis
newString = re.sub(' *\) *', ')', newString)
# Add space back in for continuation symbol
newString = re.sub('\)&', ') &', newString)
# Add space back when newString is adjacent to another variable or
# directive
# \w means any single letter, digit or underscore
newString = re.sub('(\))(\w)', r'\1 \2', newString)
return newString
def add_space_after_commas(origString):
"""
Directives with arguements need spaces insert after commas.
"""
# space after a comma
newString = re.sub(',', ', ', origString)
return newString
if __name__ == "__main__":
# This list will contain the output buffer in a line-by-line breakup
entries = []
# Translate source file one line at a time
lines = fileinput.input()
for line in lines:
# Remove extraneous spaces, but we only use
# parsedLine for lines that actually contain directives
origLine = line
parsedLine = remove_extra_spaces(line)
line = parsedLine
if debug:
print "extra spaces extracted below:"
print line
# Four cases to consider when parsing a line:
# 1. Carriage return only
# 2. White space only
# 3. No OpenACC directive
# 4. Line containing an OpenACC directive
# First case is a line with only a CR
if len(line) == 0:
if debug:
print 'Carriage return only'
entries.append(origLine)
continue
# As long as the line is not empty (case #1), it can be
# parsed. We need an iterable object and enumerate object
# to aid in search for directives. We keep track of the
# length of the line as well as its left justification,
# but only use this when we are actually translating
# directives (case #4)
lenLine = len(line)
numLeftSpaces = lenLine - len(line.lstrip(singleSpaceString))
dirs = line.split()
lenDirs = len(dirs)
enumDirs = enumerate(dirs)
# Second case is a line with only a whitespace
if lenDirs == 0:
if debug:
print 'Blank line'
entries.append(origLine)
continue
# Third case is a line that contains no directive
# Use Booleans to keep track of when an OpenACC directive
# has been found, by default we assume there is no
# ACC directive present. Also allow for the possibility
# that uppercase is used for the OpenACC directive, though
# most people will use lowercase.
accDirFound = False
accDirContinueFound = False
if ((dirs[0].lower() != accDir) and
(dirs[0].lower() != accDirContinue)):
if debug:
print 'No OpenACC directive on this line'
entries.append(origLine)
continue
# Fourth case contains some OpenACC directive
# From this point forward, we assume that a directive has
# been found and we try to do a translation.
# We will either find an OpenACC directive or a continuation
# of an OpenACC directive. Check for both, but only one
# must be found.
if dirs[0].lower() == accDir: accDirFound = True
if dirs[0].lower() == accDirContinue: accDirContinueFound = True
# Detect whether they are using upper or lower case for the OpenACC
# directive. Depending on the capitalization of the first instance
# of an OpenACC pragma on that line will determine the
# capitalization of the rest of the line. Mixed capitalization will
# throw off this detection.
accDirUpperCase = dirs[0].isupper()
accDirLowerCase = dirs[0].islower()
if debug:
print "accDirUpperCase = ", accDirUpperCase
print "accDirLowerCase = ", accDirLowerCase
# Booleans cannot be both True or both False
assert (accDirFound != accDirContinueFound)
assert (accDirUpperCase != accDirLowerCase)
if debug:
print 'OpenACC directive present. Translating.'
print dirs
# These are the cases we consider
# 1. Directive pairs. These are pairs of directives that only have
# meaning in combinations. Thus, they must be translated in pairs.
# 2. Directive pairs with arguements.
# 3. Directive single with no arguements.
# 4. Directive single with scalar arguments.
# 5. Directive single with multi-dimensional array arguements.
# First find directive pairs, this is kludgy way to search through
# directives but we do pairs first because there is overlap between
# keywords among the different directive categories.
# Counters which are only reset at each iteration of outer loop
# NOTE: If present, totalDirsFound will count nextLineContinue symbol
dualDir = None
totalDirsFound = 0
# Booleans to keep track of what directive type has been found
# Need to be reset at each iteration of inner loop
singleDirFound = False
singleDirwargsFound = False
dualDirFound = False
dualDirwargsFound = False
dirwargsFound = False
for i, dir in enumDirs:
# first iteration just put the OMP directive or continuation
# version of it into a string and go to the next iteration
if i == 0:
newLine = singleSpaceString * numLeftSpaces
if accDirUpperCase:
ompDir = ompDir.upper()
ompDirContinue = ompDirContinue.upper()
else: # accDirLowerCase is True
ompDir = ompDir.lower()
ompDirContinue = ompDirContinue.lower()
if accDirFound:
newLine = newLine + ompDir
else: # accDirContinueFound is True
newLine = newLine + ompDirContinue
continue
# second iteration store the first pragma in the pair
if i == 1:
prevdir = dir
# Special detection needed for line continuation
if dir == nextLineContinue:
totalDirsFound = totalDirsFound + 1
newLine = newLine + singleSpaceString + nextLineContinue
# Additional logic would be necessary if examining
# triplets of directives
# take adjacent directives and create new key
# store previous two directives for next iteration
# if i > 1:
# dualDir = prevdir + singleSpaceString + currentDir
# prevdrevdir = prevdir
# prevdir = dir
# Some directives will have arguements, so we need to identify
# those. The maxsplit arguement to the split method in dirwards
# is needed to identify arrays properly. We split *only* on the
# first parenthesis from the left hand side.
#
# Note that currentDir and dualDir must be in lowercase for pattern
# matching purposes.
dirwargs = dir.split('(', 1)
lenDirwargs = len(dirwargs)
currentDir = dirwargs[0].lower()
dualDir = prevdir.lower() + singleSpaceString + currentDir
if lenDirwargs > 1: dirwargsFound = True # Boolean unused for now
if debug:
print 'dirwargs = ', dirwargs
print 'dirwargs[0] = currentDir = ', currentDir
print 'lenDirswargs = ', lenDirwargs
print 'dualDir =', dualDir
# identify which case we are in, only one can be true at any time
# Need the check on dualDir equal None, because it will not exist
# on iteration = 1.
if dualDir is not None:
if dualDir in dualDirDict:
print 'OpenACC Directive Dual with no argument found'
dualDirFound = True
if dualDir in dualDirwargsDict:
print 'OpenACC Directive Dual with argument found'
dualDirwargsFound = | |
"ref/net462/System.Runtime.dll",
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@microsoft.netcore.targets//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@microsoft.netcore.targets//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@microsoft.netcore.targets//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@microsoft.netcore.targets//:netcoreapp3.1_core",
],
},
net_deps = {
"netstandard1.0": [
"@microsoft.netcore.platforms//:netstandard1.0_net",
"@microsoft.netcore.targets//:netstandard1.0_net",
],
"netstandard1.1": [
"@microsoft.netcore.platforms//:netstandard1.1_net",
"@microsoft.netcore.targets//:netstandard1.1_net",
],
"netstandard1.2": [
"@microsoft.netcore.platforms//:netstandard1.2_net",
"@microsoft.netcore.targets//:netstandard1.2_net",
],
"netstandard1.3": [
"@microsoft.netcore.platforms//:netstandard1.3_net",
"@microsoft.netcore.targets//:netstandard1.3_net",
],
"netstandard1.4": [
"@microsoft.netcore.platforms//:netstandard1.4_net",
"@microsoft.netcore.targets//:netstandard1.4_net",
],
"netstandard1.5": [
"@microsoft.netcore.platforms//:netstandard1.5_net",
"@microsoft.netcore.targets//:netstandard1.5_net",
],
"netstandard1.6": [
"@microsoft.netcore.platforms//:netstandard1.6_net",
"@microsoft.netcore.targets//:netstandard1.6_net",
],
"netstandard2.0": [
"@microsoft.netcore.platforms//:netstandard2.0_net",
"@microsoft.netcore.targets//:netstandard2.0_net",
],
"netstandard2.1": [
"@microsoft.netcore.platforms//:netstandard2.1_net",
"@microsoft.netcore.targets//:netstandard2.1_net",
],
},
net_files = {
"net462": [
"lib/net462/System.Runtime.dll",
],
"net47": [
"lib/net462/System.Runtime.dll",
],
"net471": [
"lib/net462/System.Runtime.dll",
],
"net472": [
"lib/net462/System.Runtime.dll",
],
"net48": [
"lib/net462/System.Runtime.dll",
],
},
mono_files = [
"lib/net462/System.Runtime.dll",
],
)
nuget_package(
name = "system.runtime.extensions",
package = "system.runtime.extensions",
version = "4.3.0",
sha256 = "c0b0c79a8cebf38bf55b6cd8096631c63d0547424360749545a46e0e6d1b77fa",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netcoreapp2.1": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netcoreapp3.0": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netcoreapp3.1": "ref/netstandard1.5/System.Runtime.Extensions.dll",
},
net_lib = {
"net462": "ref/net462/System.Runtime.Extensions.dll",
"net47": "ref/net462/System.Runtime.Extensions.dll",
"net471": "ref/net462/System.Runtime.Extensions.dll",
"net472": "ref/net462/System.Runtime.Extensions.dll",
"net48": "ref/net462/System.Runtime.Extensions.dll",
"netstandard1.0": "ref/netstandard1.0/System.Runtime.Extensions.dll",
"netstandard1.1": "ref/netstandard1.0/System.Runtime.Extensions.dll",
"netstandard1.2": "ref/netstandard1.0/System.Runtime.Extensions.dll",
"netstandard1.3": "ref/netstandard1.3/System.Runtime.Extensions.dll",
"netstandard1.4": "ref/netstandard1.3/System.Runtime.Extensions.dll",
"netstandard1.5": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netstandard1.6": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netstandard2.0": "ref/netstandard1.5/System.Runtime.Extensions.dll",
"netstandard2.1": "ref/netstandard1.5/System.Runtime.Extensions.dll",
},
mono_lib = "ref/net462/System.Runtime.Extensions.dll",
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@microsoft.netcore.targets//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@microsoft.netcore.targets//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@microsoft.netcore.targets//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@microsoft.netcore.targets//:netcoreapp3.1_core",
],
},
net_deps = {
"netstandard1.0": [
"@microsoft.netcore.platforms//:netstandard1.0_net",
"@microsoft.netcore.targets//:netstandard1.0_net",
],
"netstandard1.1": [
"@microsoft.netcore.platforms//:netstandard1.1_net",
"@microsoft.netcore.targets//:netstandard1.1_net",
],
"netstandard1.2": [
"@microsoft.netcore.platforms//:netstandard1.2_net",
"@microsoft.netcore.targets//:netstandard1.2_net",
],
"netstandard1.3": [
"@microsoft.netcore.platforms//:netstandard1.3_net",
"@microsoft.netcore.targets//:netstandard1.3_net",
],
"netstandard1.4": [
"@microsoft.netcore.platforms//:netstandard1.4_net",
"@microsoft.netcore.targets//:netstandard1.4_net",
],
"netstandard1.5": [
"@microsoft.netcore.platforms//:netstandard1.5_net",
"@microsoft.netcore.targets//:netstandard1.5_net",
],
"netstandard1.6": [
"@microsoft.netcore.platforms//:netstandard1.6_net",
"@microsoft.netcore.targets//:netstandard1.6_net",
],
"netstandard2.0": [
"@microsoft.netcore.platforms//:netstandard2.0_net",
"@microsoft.netcore.targets//:netstandard2.0_net",
],
"netstandard2.1": [
"@microsoft.netcore.platforms//:netstandard2.1_net",
"@microsoft.netcore.targets//:netstandard2.1_net",
],
},
net_files = {
"net462": [
"lib/net462/System.Runtime.Extensions.dll",
],
"net47": [
"lib/net462/System.Runtime.Extensions.dll",
],
"net471": [
"lib/net462/System.Runtime.Extensions.dll",
],
"net472": [
"lib/net462/System.Runtime.Extensions.dll",
],
"net48": [
"lib/net462/System.Runtime.Extensions.dll",
],
},
mono_files = [
"lib/net462/System.Runtime.Extensions.dll",
],
)
nuget_package(
name = "system.runtime.handles",
package = "system.runtime.handles",
version = "4.3.0",
sha256 = "289e5a5e81a9079e98ebe89ea4191da71fc07da243022b71e2fae42ea47b826b",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netcoreapp2.1": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netcoreapp3.0": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netcoreapp3.1": "ref/netstandard1.3/System.Runtime.Handles.dll",
},
net_lib = {
"netstandard1.3": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netstandard1.4": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netstandard1.5": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netstandard1.6": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netstandard2.0": "ref/netstandard1.3/System.Runtime.Handles.dll",
"netstandard2.1": "ref/netstandard1.3/System.Runtime.Handles.dll",
},
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@microsoft.netcore.targets//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@microsoft.netcore.targets//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@microsoft.netcore.targets//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@microsoft.netcore.targets//:netcoreapp3.1_core",
],
},
net_deps = {
"netstandard1.3": [
"@microsoft.netcore.platforms//:netstandard1.3_net",
"@microsoft.netcore.targets//:netstandard1.3_net",
],
"netstandard1.4": [
"@microsoft.netcore.platforms//:netstandard1.4_net",
"@microsoft.netcore.targets//:netstandard1.4_net",
],
"netstandard1.5": [
"@microsoft.netcore.platforms//:netstandard1.5_net",
"@microsoft.netcore.targets//:netstandard1.5_net",
],
"netstandard1.6": [
"@microsoft.netcore.platforms//:netstandard1.6_net",
"@microsoft.netcore.targets//:netstandard1.6_net",
],
"netstandard2.0": [
"@microsoft.netcore.platforms//:netstandard2.0_net",
"@microsoft.netcore.targets//:netstandard2.0_net",
],
"netstandard2.1": [
"@microsoft.netcore.platforms//:netstandard2.1_net",
"@microsoft.netcore.targets//:netstandard2.1_net",
],
},
)
nuget_package(
name = "system.runtime.interopservices",
package = "system.runtime.interopservices",
version = "4.3.0",
sha256 = "f2c0c7f965097c247eedee277e97ed8fffa5b2d122662c56501b9e476ce61e02",
core_lib = {
"netcoreapp2.0": "ref/netcoreapp1.1/System.Runtime.InteropServices.dll",
"netcoreapp2.1": "ref/netcoreapp1.1/System.Runtime.InteropServices.dll",
"netcoreapp3.0": "ref/netcoreapp1.1/System.Runtime.InteropServices.dll",
"netcoreapp3.1": "ref/netcoreapp1.1/System.Runtime.InteropServices.dll",
},
net_lib = {
"net462": "ref/net462/System.Runtime.InteropServices.dll",
"net47": "ref/net463/System.Runtime.InteropServices.dll",
"net471": "ref/net463/System.Runtime.InteropServices.dll",
"net472": "ref/net463/System.Runtime.InteropServices.dll",
"net48": "ref/net463/System.Runtime.InteropServices.dll",
"netstandard1.1": "ref/netstandard1.1/System.Runtime.InteropServices.dll",
"netstandard1.2": "ref/netstandard1.2/System.Runtime.InteropServices.dll",
"netstandard1.3": "ref/netstandard1.3/System.Runtime.InteropServices.dll",
"netstandard1.4": "ref/netstandard1.3/System.Runtime.InteropServices.dll",
"netstandard1.5": "ref/netstandard1.5/System.Runtime.InteropServices.dll",
"netstandard1.6": "ref/netstandard1.5/System.Runtime.InteropServices.dll",
"netstandard2.0": "ref/netstandard1.5/System.Runtime.InteropServices.dll",
"netstandard2.1": "ref/netstandard1.5/System.Runtime.InteropServices.dll",
},
mono_lib = "ref/net463/System.Runtime.InteropServices.dll",
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@microsoft.netcore.targets//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@microsoft.netcore.targets//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@microsoft.netcore.targets//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@microsoft.netcore.targets//:netcoreapp3.1_core",
],
},
net_deps = {
"net462": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net462_system.runtime.dll",
],
"net47": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net47_system.runtime.dll",
],
"net471": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net471_system.runtime.dll",
],
"net472": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net472_system.runtime.dll",
],
"net48": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net48_system.runtime.dll",
],
"netstandard1.1": [
"@microsoft.netcore.platforms//:netstandard1.1_net",
"@microsoft.netcore.targets//:netstandard1.1_net",
],
"netstandard1.2": [
"@microsoft.netcore.platforms//:netstandard1.2_net",
"@microsoft.netcore.targets//:netstandard1.2_net",
],
"netstandard1.3": [
"@microsoft.netcore.platforms//:netstandard1.3_net",
"@microsoft.netcore.targets//:netstandard1.3_net",
],
"netstandard1.4": [
"@microsoft.netcore.platforms//:netstandard1.4_net",
"@microsoft.netcore.targets//:netstandard1.4_net",
],
"netstandard1.5": [
"@microsoft.netcore.platforms//:netstandard1.5_net",
"@microsoft.netcore.targets//:netstandard1.5_net",
],
"netstandard1.6": [
"@microsoft.netcore.platforms//:netstandard1.6_net",
"@microsoft.netcore.targets//:netstandard1.6_net",
],
"netstandard2.0": [
"@microsoft.netcore.platforms//:netstandard2.0_net",
"@microsoft.netcore.targets//:netstandard2.0_net",
],
"netstandard2.1": [
"@microsoft.netcore.platforms//:netstandard2.1_net",
"@microsoft.netcore.targets//:netstandard2.1_net",
],
},
mono_deps = [
"@io_bazel_rules_dotnet//dotnet/stdlib:system.runtime.dll",
],
net_files = {
"net462": [
"lib/net462/System.Runtime.InteropServices.dll",
],
"net47": [
"lib/net463/System.Runtime.InteropServices.dll",
],
"net471": [
"lib/net463/System.Runtime.InteropServices.dll",
],
"net472": [
"lib/net463/System.Runtime.InteropServices.dll",
],
"net48": [
"lib/net463/System.Runtime.InteropServices.dll",
],
},
mono_files = [
"lib/net463/System.Runtime.InteropServices.dll",
],
)
nuget_package(
name = "system.runtime.interopservices.runtimeinformation",
package = "system.runtime.interopservices.runtimeinformation",
version = "4.3.0",
sha256 = "318a65ebf6720ba8639b359121efa20e895d38c5b599f6f05ec76e0275c82860",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netcoreapp2.1": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netcoreapp3.0": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netcoreapp3.1": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
},
net_lib = {
"net45": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net451": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net452": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net46": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net461": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net462": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net47": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net471": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net472": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"net48": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.1": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.2": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.3": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.4": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.5": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard1.6": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard2.0": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
"netstandard2.1": "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
},
mono_lib = "ref/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
core_deps = {
"netcoreapp2.0": [
"@runtime.native.system//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@runtime.native.system//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@runtime.native.system//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@runtime.native.system//:netcoreapp3.1_core",
],
},
net_deps = {
"netstandard1.1": [
"@runtime.native.system//:netstandard1.1_net",
],
"netstandard1.2": [
"@runtime.native.system//:netstandard1.2_net",
],
"netstandard1.3": [
"@runtime.native.system//:netstandard1.3_net",
],
"netstandard1.4": [
"@runtime.native.system//:netstandard1.4_net",
],
"netstandard1.5": [
"@runtime.native.system//:netstandard1.5_net",
],
"netstandard1.6": [
"@runtime.native.system//:netstandard1.6_net",
],
"netstandard2.0": [
"@runtime.native.system//:netstandard2.0_net",
],
"netstandard2.1": [
"@runtime.native.system//:netstandard2.1_net",
],
},
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netcoreapp2.1": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netcoreapp3.0": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netcoreapp3.1": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
},
net_files = {
"net45": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net451": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net452": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net46": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net461": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net462": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net47": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net471": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net472": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"net48": [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.1": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.2": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.3": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.4": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.5": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard1.6": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard2.0": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
"netstandard2.1": [
"lib/netstandard1.1/System.Runtime.InteropServices.RuntimeInformation.dll",
],
},
mono_files = [
"lib/net45/System.Runtime.InteropServices.RuntimeInformation.dll",
],
)
nuget_package(
name = "system.runtime.numerics",
package = "system.runtime.numerics",
version = "4.3.0",
sha256 = "3f98c70a031b80531888e36fce668a15e3aa7002033eefd4f1b395acd3d82aa7",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netcoreapp2.1": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netcoreapp3.0": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netcoreapp3.1": "ref/netstandard1.1/System.Runtime.Numerics.dll",
},
net_lib = {
"netstandard1.1": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard1.2": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard1.3": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard1.4": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard1.5": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard1.6": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard2.0": "ref/netstandard1.1/System.Runtime.Numerics.dll",
"netstandard2.1": "ref/netstandard1.1/System.Runtime.Numerics.dll",
},
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netcoreapp2.1": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netcoreapp3.0": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netcoreapp3.1": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
},
net_files = {
"netstandard1.3": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netstandard1.4": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netstandard1.5": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netstandard1.6": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netstandard2.0": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
"netstandard2.1": [
"lib/netstandard1.3/System.Runtime.Numerics.dll",
],
},
)
nuget_package(
name = "system.security.cryptography.encoding",
package = "system.security.cryptography.encoding",
version = "4.3.0",
sha256 = "62e81ef3d37a33e35c6e572f5cc7b21d9ea46437f006fdcb3cc0e217c1e126cb",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netcoreapp2.1": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netcoreapp3.0": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netcoreapp3.1": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
},
net_lib = {
"net46": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net461": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net462": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net47": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net471": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net472": "ref/net46/System.Security.Cryptography.Encoding.dll",
"net48": "ref/net46/System.Security.Cryptography.Encoding.dll",
"netstandard1.3": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netstandard1.4": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netstandard1.5": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netstandard1.6": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netstandard2.0": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
"netstandard2.1": "ref/netstandard1.3/System.Security.Cryptography.Encoding.dll",
},
mono_lib = "ref/net46/System.Security.Cryptography.Encoding.dll",
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp3.1_core",
],
},
net_deps = {
"netstandard1.3": [
"@microsoft.netcore.platforms//:netstandard1.3_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard1.3_net",
],
"netstandard1.4": [
"@microsoft.netcore.platforms//:netstandard1.4_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard1.4_net",
],
"netstandard1.5": [
"@microsoft.netcore.platforms//:netstandard1.5_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard1.5_net",
],
"netstandard1.6": [
"@microsoft.netcore.platforms//:netstandard1.6_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard1.6_net",
],
"netstandard2.0": [
"@microsoft.netcore.platforms//:netstandard2.0_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard2.0_net",
],
"netstandard2.1": [
"@microsoft.netcore.platforms//:netstandard2.1_net",
"@runtime.native.system.security.cryptography.openssl//:netstandard2.1_net",
],
},
net_files = {
"net46": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net461": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net462": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net47": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net471": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net472": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
"net48": [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
},
mono_files = [
"lib/net46/System.Security.Cryptography.Encoding.dll",
],
)
nuget_package(
name = "system.security.cryptography.primitives",
package = "system.security.cryptography.primitives",
version = "4.3.0",
sha256 = "7e7162ec1dd29d58f96be05b8179db8e718dbd6ac2114e87a7fc23b235b3df5f",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netcoreapp2.1": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netcoreapp3.0": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netcoreapp3.1": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
},
net_lib = {
"net46": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net461": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net462": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net47": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net471": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net472": "ref/net46/System.Security.Cryptography.Primitives.dll",
"net48": "ref/net46/System.Security.Cryptography.Primitives.dll",
"netstandard1.3": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netstandard1.4": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netstandard1.5": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netstandard1.6": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netstandard2.0": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
"netstandard2.1": "ref/netstandard1.3/System.Security.Cryptography.Primitives.dll",
},
mono_lib = "ref/net46/System.Security.Cryptography.Primitives.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netcoreapp2.1": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netcoreapp3.0": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netcoreapp3.1": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
},
net_files = {
"net46": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net461": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net462": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net47": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net471": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net472": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"net48": [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
"netstandard1.3": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netstandard1.4": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netstandard1.5": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netstandard1.6": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netstandard2.0": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
"netstandard2.1": [
"lib/netstandard1.3/System.Security.Cryptography.Primitives.dll",
],
},
mono_files = [
"lib/net46/System.Security.Cryptography.Primitives.dll",
],
)
nuget_package(
name = "system.security.cryptography.algorithms",
package = "system.security.cryptography.algorithms",
version = "4.3.0",
sha256 = "b4026f35295ccd8049dc4776e007b6edae79b6afe7e03ddfb9b35077070a580f",
core_lib = {
"netcoreapp2.0": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
"netcoreapp2.1": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
"netcoreapp3.0": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
"netcoreapp3.1": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
},
net_lib = {
"net46": "ref/net46/System.Security.Cryptography.Algorithms.dll",
"net461": "ref/net461/System.Security.Cryptography.Algorithms.dll",
"net462": "ref/net461/System.Security.Cryptography.Algorithms.dll",
"net47": "ref/net463/System.Security.Cryptography.Algorithms.dll",
"net471": "ref/net463/System.Security.Cryptography.Algorithms.dll",
"net472": "ref/net463/System.Security.Cryptography.Algorithms.dll",
"net48": "ref/net463/System.Security.Cryptography.Algorithms.dll",
"netstandard1.3": "ref/netstandard1.3/System.Security.Cryptography.Algorithms.dll",
"netstandard1.4": "ref/netstandard1.4/System.Security.Cryptography.Algorithms.dll",
"netstandard1.5": "ref/netstandard1.4/System.Security.Cryptography.Algorithms.dll",
"netstandard1.6": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
"netstandard2.0": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
"netstandard2.1": "ref/netstandard1.6/System.Security.Cryptography.Algorithms.dll",
},
mono_lib = "ref/net463/System.Security.Cryptography.Algorithms.dll",
core_deps = {
"netcoreapp2.0": [
"@microsoft.netcore.platforms//:netcoreapp2.0_core",
"@runtime.native.system.security.cryptography.apple//:netcoreapp2.0_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp2.0_core",
],
"netcoreapp2.1": [
"@microsoft.netcore.platforms//:netcoreapp2.1_core",
"@runtime.native.system.security.cryptography.apple//:netcoreapp2.1_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp2.1_core",
],
"netcoreapp3.0": [
"@microsoft.netcore.platforms//:netcoreapp3.0_core",
"@runtime.native.system.security.cryptography.apple//:netcoreapp3.0_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp3.0_core",
],
"netcoreapp3.1": [
"@microsoft.netcore.platforms//:netcoreapp3.1_core",
"@runtime.native.system.security.cryptography.apple//:netcoreapp3.1_core",
"@runtime.native.system.security.cryptography.openssl//:netcoreapp3.1_core",
],
},
net_deps = {
"net46": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net46_system.security.cryptography.primitives.dll",
],
"net461": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net461_system.security.cryptography.primitives.dll",
],
"net462": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net462_system.security.cryptography.primitives.dll",
],
"net47": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net47_system.io.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net47_system.runtime.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net47_system.security.cryptography.encoding.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net47_system.security.cryptography.primitives.dll",
],
"net471": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net471_system.io.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net471_system.runtime.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net471_system.security.cryptography.encoding.dll",
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net471_system.security.cryptography.primitives.dll",
],
"net472": [
"@io_bazel_rules_dotnet//dotnet/stdlib.net:net472_system.io.dll",
| |
<reponame>USF-GT-Molecular-Modeling/hoomd-blue<gh_stars>0
# Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
r"""Neighbor list acceleration structures.
Pair forces (`hoomd.md.pair`) use neighbor list data structures to find
neighboring particle pairs (those within a distance of :math:`r_\mathrm{cut}`)
efficiently. HOOMD-blue provides a several types of neighbor list construction
algorithms that you can select from: `Cell`, `Tree`, and `Stencil`.
Multiple pair force objects can share a single neighbor list, or use independent
neighbor list objects. When neighbor lists are shared, they find neighbors
within the the maximum :math:`r_{\mathrm{cut},i,j}` over the associated pair
potentials.
.. rubric:: Buffer distance
Set the `NeighborList.buffer` distance to amortize the cost of the neighbor list
build. When ``buffer > 0``, a neighbor list computed on one step can be reused
on subsequent steps until a particle moves a distance ``buffer/2``. When
`NeighborList.check_dist` is `True`, `NeighborList` starts checking how far
particles have moved `NeighborList.rebuild_check_delay` time steps after the
last build and performs a rebuild when any particle has moved a distance
``buffer/2``. When `NeighborList.check_dist` is `False`, `NeighborList` always
rebuilds after `NeighborList.rebuild_check_delay` time steps.
Note:
With the default settings (``check_dist=True`` and
``rebuild_check_delay=1``), changing `NeighborList.buffer` only impacts
simulation performance and not correctness.
Set the buffer too small and the neighbor list will need to be updated
often, slowing simulation performance. Set the buffer too large, and
`hoomd.md.pair.Pair` will need to needlessly calculate many non-interacting
particle pairs and slow the simulation. There is an optimal value for
`NeighborList.buffer` between the two extremes that provides the best
performance.
.. rubric:: Exclusions
Neighbor lists nominally include all particles within the chosen cutoff
distances. The `NeighborList.exclusions` attribute defines which particles will
be excluded from the list, even if they are within the cutoff.
`NeighborList.exclusions` is a tuple of strings that enable one more more types
of exclusions. The valid exclusion types are:
* ``'angle'``: Exclude the first and third particles in each angle.
* ``'body'``: Exclude particles that belong to the same rigid body.
* ``'bond'``: Exclude particles that are directly bonded together.
* ``'meshbond'``: Exclude particles that are bonded together via a mesh.
* ``'constraint'``: Exclude particles that have a distance constraint applied
between them.
* ``'dihedral'``: Exclude the first and fourth particles in each dihedral.
* ``'special_pair'``: Exclude particles that are part of a special pair.
* ``'1-3'``: Exclude particles *i* and *k* whenever there is a bond (i,j) and
a bond (j,k).
* ``'1-4'``: Exclude particles *i* and *m* whenever there are bonds (i,j),
(j,k), and (k,m).
"""
import hoomd
from hoomd.data.parameterdicts import ParameterDict
from hoomd.data.typeconverter import OnlyFrom, OnlyTypes
from hoomd.logging import log
from hoomd.mesh import Mesh
from hoomd.md import _md
from hoomd.operation import _HOOMDBaseObject
class NeighborList(_HOOMDBaseObject):
r"""Base class neighbor list.
Note:
`NeighborList` is the base class for all neighbor lists. Users should
not instantiate this class directly.
Attributes:
buffer (float): Buffer width :math:`[\mathrm{length}]`.
exclusions (tuple[str]): Defines which particles to exclude from the
neighbor list, see more details above.
rebuild_check_delay (int): How often to attempt to rebuild the neighbor
list.
check_dist (bool): Flag to enable / disable distance checking.
mesh (Mesh): mesh data structure (optional)
"""
def __init__(self, buffer, exclusions, rebuild_check_delay, check_dist,
mesh):
validate_exclusions = OnlyFrom([
'bond', 'angle', 'constraint', 'dihedral', 'special_pair', 'body',
'1-3', '1-4', 'meshbond'
])
validate_mesh = OnlyTypes(Mesh, allow_none=True)
# default exclusions
params = ParameterDict(exclusions=[validate_exclusions],
buffer=float(buffer),
rebuild_check_delay=int(rebuild_check_delay),
check_dist=bool(check_dist))
params["exclusions"] = exclusions
self._param_dict.update(params)
self._mesh = validate_mesh(mesh)
def _attach(self):
if self._mesh is not None:
self._cpp_obj.addMesh(self._mesh._cpp_obj)
super()._attach()
@log(requires_run=True)
def shortest_rebuild(self):
"""int: The shortest period between neighbor list rebuilds.
`shortest_rebuild` is the smallest number of time steps between neighbor
list rebuilds during the previous `Simulation.run`.
"""
return self._cpp_obj.getSmallestRebuild()
def _remove_dependent(self, obj):
super()._remove_dependent(obj)
if len(self._dependents) == 0:
if self._attached:
self._detach()
self._remove()
return
if self._added:
self._remove()
class Cell(NeighborList):
r"""Neighbor list computed via a cell list.
Args:
buffer (float): Buffer width :math:`[\mathrm{length}]`.
exclusions (tuple[str]): Defines which particles to exclude from the
neighbor list, see more details in `NeighborList`.
rebuild_check_delay (int): How often to attempt to rebuild the neighbor
list.
check_dist (bool): Flag to enable / disable distance checking.
deterministic (bool): When `True`, sort neighbors to help provide
deterministic simulation runs.
mesh (Mesh): When a mesh object is passed, the neighbor list uses the
mesh to determine the bond exclusions in addition to all other
set exclusions.
`Cell` finds neighboring particles using a fixed width cell list, allowing
for *O(kN)* construction of the neighbor list where *k* is the number of
particles per cell. Cells are sized to the largest :math:`r_\mathrm{cut}`.
This method is very efficient for systems with nearly monodisperse cutoffs,
but performance degrades for large cutoff radius asymmetries due to the
significantly increased number of particles per cell. In practice, `Cell`
is usually the best option for most users when the asymmetry between the
largest and smallest cutoff radius is less than 2:1.
.. image:: cell_list.png
:width: 250 px
:align: center
:alt: Cell list schematic
Note:
`Cell` may consume a significant amount of memory, especially on GPU
devices. One cause of this can be non-uniform density distributions
because the memory allocated for the cell list is proportional the
maximum number of particles in any cell. Another common cause is large
box volumes combined with small cutoffs, which results in a very large
number of cells in the system. In these cases, consider using `Stencil`
or `Tree`, which can use less memory.
Examples::
cell = nlist.Cell()
Attributes:
deterministic (bool): When `True`, sort neighbors to help provide
deterministic simulation runs.
"""
def __init__(self,
buffer,
exclusions=('bond',),
rebuild_check_delay=1,
check_dist=True,
deterministic=False,
mesh=None):
super().__init__(buffer, exclusions, rebuild_check_delay, check_dist,
mesh)
self._param_dict.update(
ParameterDict(deterministic=bool(deterministic)))
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.CPU):
nlist_cls = _md.NeighborListBinned
else:
nlist_cls = _md.NeighborListGPUBinned
self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def,
self.buffer)
super()._attach()
class Stencil(NeighborList):
"""Cell list based neighbor list using stencils.
Args:
cell_width (float): The underlying stencil bin width for the cell list
:math:`[\\mathrm{length}]`.
buffer (float): Buffer width :math:`[\\mathrm{length}]`.
exclusions (tuple[str]): Defines which particles to exclude from the
neighbor list, see more details in `NeighborList`.
rebuild_check_delay (int): How often to attempt to rebuild the neighbor
list.
check_dist (bool): Flag to enable / disable distance checking.
deterministic (bool): When `True`, sort neighbors to help provide
deterministic simulation runs.
mesh (Mesh): When a mesh object is passed, the neighbor list uses the
mesh to determine the bond exclusions in addition to all other
set exclusions.
`Stencil` finds neighboring particles using a fixed width cell list, for
*O(kN)* construction of the neighbor list where *k* is the number of
particles per cell. In contrast with `Cell`, `Stencil` allows the user to
choose the cell width: `cell_width` instead of fixing it to the largest
cutoff radius (`<NAME> et al. 2008
<http://dx.doi.org/10.1016/j.cpc.2008.03.005>`_):
.. image:: stencil_schematic.png
:width: 300 px
:align: center
:alt: Stenciled cell list schematic
This neighbor list style differs from `Cell` in how the adjacent cells are
searched for particles. One stencil is computed per particle type based on
the value of `cell_width` set by the user, which defines the bins that the
particle must search in. Distances to the bins in the stencil are
precomputed so that certain particles can be quickly excluded from the
neighbor list, leading to improved performance compared to `Cell` when there
is size disparity in the cutoff radius. The memory demands of `Stencil` can
also be lower than `Cell` if your system is large and has many small cells
in it; however, `Tree` is usually a better choice for these systems.
The performance of `Stencil` depends strongly on the choice of *cell_width*.
The best performance is obtained when the cutoff radii are multiples of the
*cell_width*, and when the *cell_width* covers the simulation box with a
roughly integer number of cells.
Examples::
nl_s = nlist.Stencil(cell_width=1.5)
Important:
`<NAME> et al. 2016 <http://dx.doi.org/10.1016/j.cpc.2016.02.003>`_
describes this neighbor list implementation. Cite it if you utilize
`Stencil` in your research.
Attributes:
cell_width (float): The underlying stencil bin width for the cell list
:math:`[\\mathrm{length}]`.
deterministic (bool): When `True`, sort neighbors to help provide
deterministic simulation runs.
"""
def __init__(self,
cell_width,
buffer,
exclusions=('bond',),
rebuild_check_delay=1,
check_dist=True,
deterministic=False,
mesh=None):
super().__init__(buffer, exclusions, rebuild_check_delay, check_dist,
mesh)
params = ParameterDict(deterministic=bool(deterministic),
cell_width=float(cell_width))
self._param_dict.update(params)
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.CPU):
nlist_cls = _md.NeighborListStencil
else:
nlist_cls = _md.NeighborListGPUStencil
self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def,
| |
fake_name,
fake_instance,
self.fake_marathon_app_config.config_dict,
{
'desired_state': 'start',
'force_bounce': '99999',
}
)
fake_service_config_3 = marathon_tools.MarathonServiceConfig(
fake_name,
fake_instance,
self.fake_marathon_app_config.config_dict,
{
'desired_state': 'stop',
'force_bounce': '99999',
}
)
with contextlib.nested(
mock.patch('marathon_tools.load_system_paasta_config',
autospec=True, return_value=fake_system_paasta_config),
mock.patch('marathon_tools.load_marathon_service_config', autospec=True),
mock.patch('marathon_tools.get_docker_url', autospec=True, return_value=fake_url),
mock.patch('marathon_tools.load_service_namespace_config', autospec=True,
return_value=self.fake_service_namespace_config),
mock.patch('marathon_tools.get_mesos_slaves_grouped_by_attribute',
autospec=True, return_value={'fake_region': {}})
) as (
load_system_paasta_config_patch,
read_service_config_patch,
docker_url_patch,
_,
__,
):
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value=fake_cluster)
read_service_config_patch.return_value = fake_service_config_1
first_id = marathon_tools.create_complete_config(fake_name,
fake_instance,
self.fake_marathon_config)['id']
first_id_2 = marathon_tools.create_complete_config(fake_name,
fake_instance,
self.fake_marathon_config)['id']
# just for sanity, make sure that the app_id is idempotent.
assert first_id == first_id_2
read_service_config_patch.return_value = fake_service_config_2
second_id = marathon_tools.create_complete_config(fake_name,
fake_instance,
self.fake_marathon_config)['id']
assert first_id != second_id
read_service_config_patch.return_value = fake_service_config_3
third_id = marathon_tools.create_complete_config(fake_name,
fake_instance,
self.fake_marathon_config)['id']
assert second_id != third_id
def test_get_expected_instance_count_for_namespace(self):
service = 'red'
namespace = 'rojo'
soa_dir = 'que_esta'
fake_instances = [(service, 'blue'), (service, 'green')]
fake_srv_config = marathon_tools.MarathonServiceConfig(
service=service,
instance='blue',
config_dict={'nerve_ns': 'rojo', 'instances': 11},
branch_dict={},
)
def config_helper(name, inst, cluster, soa_dir=None):
if inst == 'blue':
return fake_srv_config
else:
return marathon_tools.MarathonServiceConfig(service, 'green', {'nerve_ns': 'amarillo'}, {})
with contextlib.nested(
mock.patch('marathon_tools.get_service_instance_list',
autospec=True,
return_value=fake_instances),
mock.patch('marathon_tools.load_marathon_service_config',
autospec=True,
side_effect=config_helper),
) as (
inst_list_patch,
read_config_patch,
):
actual = marathon_tools.get_expected_instance_count_for_namespace(
service,
namespace,
cluster='fake_cluster',
soa_dir=soa_dir,
)
assert actual == 11
inst_list_patch.assert_called_once_with(service,
cluster='fake_cluster',
instance_type='marathon',
soa_dir=soa_dir)
read_config_patch.assert_any_call(service, 'blue', 'fake_cluster', soa_dir=soa_dir)
read_config_patch.assert_any_call(service, 'green', 'fake_cluster', soa_dir=soa_dir)
def test_get_matching_appids(self):
fakeapp1 = mock.Mock(id='/fake--service.fake--instance---bouncingold')
fakeapp2 = mock.Mock(id='/fake--service.fake--instance---bouncingnew')
fakeapp3 = mock.Mock(id='/fake--service.other--instance--bla')
fakeapp4 = mock.Mock(id='/other--service')
apps = [fakeapp1, fakeapp2, fakeapp3, fakeapp4]
list_apps_mock = mock.Mock(return_value=apps)
fake_client = mock.Mock(list_apps=list_apps_mock)
expected = [
'/fake--service.fake--instance---bouncingold',
'/fake--service.fake--instance---bouncingnew',
]
actual = marathon_tools.get_matching_appids('fake_service', 'fake_instance', fake_client)
assert actual == expected
def test_get_healthcheck_cmd_happy(self):
fake_conf = marathon_tools.MarathonServiceConfig(
'fake_name',
'fake_instance',
{'healthcheck_cmd': 'test_cmd'},
{},
)
actual = fake_conf.get_healthcheck_cmd()
assert actual == 'test_cmd'
def test_get_healthcheck_cmd_raises_when_unset(self):
fake_conf = marathon_tools.MarathonServiceConfig(
'fake_name',
'fake_instance',
{},
{},
)
with raises(marathon_tools.InvalidInstanceConfig) as exc:
fake_conf.get_healthcheck_cmd()
assert "healthcheck mode 'cmd' requires a healthcheck_cmd to run" in str(exc.value)
def test_get_healthcheck_for_instance_http(self):
fake_service = 'fake_service'
fake_namespace = 'fake_namespace'
fake_hostname = 'fake_hostname'
fake_random_port = 666
fake_path = '/fake_path'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(fake_service, fake_namespace, {}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({
'mode': 'http',
'healthcheck_uri': fake_path,
})
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config',
autospec=True,
return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True,
return_value=fake_service_namespace_config),
mock.patch('socket.getfqdn', autospec=True, return_value=fake_hostname),
) as (
read_config_patch,
load_service_namespace_config_patch,
hostname_patch
):
expected = ('http', 'http://%s:%d%s' % (fake_hostname, fake_random_port, fake_path))
actual = marathon_tools.get_healthcheck_for_instance(
fake_service, fake_namespace, fake_marathon_service_config, fake_random_port)
assert expected == actual
def test_get_healthcheck_for_instance_tcp(self):
fake_service = 'fake_service'
fake_namespace = 'fake_namespace'
fake_hostname = 'fake_hostname'
fake_random_port = 666
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(fake_service, fake_namespace, {}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({
'mode': 'tcp',
})
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config',
autospec=True,
return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True,
return_value=fake_service_namespace_config),
mock.patch('socket.getfqdn', autospec=True, return_value=fake_hostname),
) as (
read_config_patch,
load_service_namespace_config_patch,
hostname_patch
):
expected = ('tcp', 'tcp://%s:%d' % (fake_hostname, fake_random_port))
actual = marathon_tools.get_healthcheck_for_instance(
fake_service, fake_namespace, fake_marathon_service_config, fake_random_port)
assert expected == actual
def test_get_healthcheck_for_instance_cmd(self):
fake_service = 'fake_service'
fake_namespace = 'fake_namespace'
fake_hostname = 'fake_hostname'
fake_random_port = 666
fake_cmd = '/bin/fake_command'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(fake_service, fake_namespace, {
'healthcheck_mode': 'cmd',
'healthcheck_cmd': fake_cmd
}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({})
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config',
autospec=True,
return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True,
return_value=fake_service_namespace_config),
mock.patch('socket.getfqdn', autospec=True, return_value=fake_hostname),
) as (
read_config_patch,
load_service_namespace_config_patch,
hostname_patch
):
expected = ('cmd', fake_cmd)
actual = marathon_tools.get_healthcheck_for_instance(
fake_service, fake_namespace, fake_marathon_service_config, fake_random_port)
assert expected == actual
def test_get_healthcheck_for_instance_other(self):
fake_service = 'fake_service'
fake_namespace = 'fake_namespace'
fake_hostname = 'fake_hostname'
fake_random_port = 666
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(fake_service, fake_namespace, {
'healthcheck_mode': None,
}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({})
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config',
autospec=True,
return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True,
return_value=fake_service_namespace_config),
mock.patch('socket.getfqdn', autospec=True, return_value=fake_hostname),
) as (
read_config_patch,
load_service_namespace_config_patch,
hostname_patch
):
expected = (None, None)
actual = marathon_tools.get_healthcheck_for_instance(
fake_service, fake_namespace, fake_marathon_service_config, fake_random_port)
assert expected == actual
class TestMarathonServiceConfig(object):
def test_repr(self):
actual = repr(marathon_tools.MarathonServiceConfig('foo', 'bar', {'baz': 'baz'}, {'bubble': 'gum'}))
expected = """MarathonServiceConfig('foo', 'bar', {'baz': 'baz'}, {'bubble': 'gum'})"""
assert actual == expected
def test_get_healthcheck_mode_default(self):
namespace_config = marathon_tools.ServiceNamespaceConfig({})
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {}, {})
assert marathon_config.get_healthcheck_mode(namespace_config) is None
def test_get_healthcheck_mode_default_from_namespace_config(self):
namespace_config = marathon_tools.ServiceNamespaceConfig({'proxy_port': 1234})
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {}, {})
assert marathon_config.get_healthcheck_mode(namespace_config) == 'http'
def test_get_healthcheck_mode_valid(self):
namespace_config = marathon_tools.ServiceNamespaceConfig({})
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {'healthcheck_mode': 'tcp'}, {})
assert marathon_config.get_healthcheck_mode(namespace_config) == 'tcp'
def test_get_healthcheck_mode_invalid(self):
namespace_config = marathon_tools.ServiceNamespaceConfig({})
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {'healthcheck_mode': 'udp'}, {})
with raises(marathon_tools.InvalidMarathonHealthcheckMode):
marathon_config.get_healthcheck_mode(namespace_config)
def test_get_healthcheck_mode_explicit_none(self):
namespace_config = marathon_tools.ServiceNamespaceConfig({})
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {'healthcheck_mode': None}, {})
assert marathon_config.get_healthcheck_mode(namespace_config) is None
def test_get_healthchecks_http_overrides(self):
fake_path = '/mycoolstatus'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
"service",
"instance",
{
"healthcheck_mode": "http", # Actually the default here, but I want to be specific.
"healthcheck_uri": fake_path,
"healthcheck_grace_period_seconds": 70,
"healthcheck_interval_seconds": 12,
"healthcheck_timeout_seconds": 13,
"healthcheck_max_consecutive_failures": 7,
},
{},
)
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({
'mode': 'http',
'healthcheck_uri': fake_path,
})
expected = [
{
"protocol": "HTTP",
"path": fake_path,
"gracePeriodSeconds": 70,
"intervalSeconds": 12,
"portIndex": 0,
"timeoutSeconds": 13,
"maxConsecutiveFailures": 7,
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_http_defaults(self):
fake_marathon_service_config = marathon_tools.MarathonServiceConfig("service", "instance", {}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({'mode': 'http'})
expected = [
{
"protocol": "HTTP",
"path": '/status',
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"portIndex": 0,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_tcp(self):
fake_marathon_service_config = marathon_tools.MarathonServiceConfig("service", "instance", {}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({'mode': 'tcp'})
expected = [
{
"protocol": "TCP",
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"portIndex": 0,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_cmd(self):
fake_command = '/fake_cmd'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
"service", "instance", {'healthcheck_mode': 'cmd', 'healthcheck_cmd': fake_command}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
expected_cmd = "paasta_execute_docker_command --mesos-id \"$MESOS_TASK_ID\" --cmd /fake_cmd --timeout '10'"
expected = [
{
"protocol": "COMMAND",
"command": {"value": expected_cmd},
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_cmd_quotes(self):
fake_command = '/bin/fake_command with spaces'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
"service", "instance", {'healthcheck_mode': 'cmd', 'healthcheck_cmd': fake_command}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
expected_cmd = "paasta_execute_docker_command " \
"--mesos-id \"$MESOS_TASK_ID\" --cmd '%s' --timeout '10'" % fake_command
expected = [
{
"protocol": "COMMAND",
"command": {"value": expected_cmd},
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_cmd_overrides(self):
fake_command = '/bin/fake_command'
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
"service", "instance", {'healthcheck_mode': 'cmd', 'healthcheck_cmd': fake_command}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
expected_cmd = "paasta_execute_docker_command " \
"--mesos-id \"$MESOS_TASK_ID\" --cmd %s --timeout '10'" % fake_command
expected = [
{
"protocol": "COMMAND",
"command": {"value": expected_cmd},
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_cmd_overrides_timeout(self):
fake_command = '/bin/fake_command'
fake_timeout = 4
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
"service",
"instance",
{'healthcheck_mode': 'cmd', 'healthcheck_timeout_seconds': fake_timeout, 'healthcheck_cmd': fake_command},
{}
)
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
expected_cmd = "paasta_execute_docker_command " \
"--mesos-id \"$MESOS_TASK_ID\" --cmd %s --timeout '%s'" % (fake_command, fake_timeout)
expected = [
{
"protocol": "COMMAND",
"command": {"value": expected_cmd},
"gracePeriodSeconds": 60,
"intervalSeconds": 10,
"timeoutSeconds": fake_timeout,
"maxConsecutiveFailures": 6
},
]
actual = fake_marathon_service_config.get_healthchecks(fake_service_namespace_config)
assert actual == expected
def test_get_healthchecks_empty(self):
fake_marathon_service_config = marathon_tools.MarathonServiceConfig("service", "instance", {}, {})
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({})
assert fake_marathon_service_config.get_healthchecks(fake_service_namespace_config) == []
def test_get_healthchecks_invalid_mode(self):
marathon_config = marathon_tools.MarathonServiceConfig("service", "instance", {'healthcheck_mode': 'none'}, {})
namespace_config = marathon_tools.ServiceNamespaceConfig({})
with raises(marathon_tools.InvalidMarathonHealthcheckMode):
marathon_config.get_healthchecks(namespace_config)
class TestServiceNamespaceConfig(object):
def test_get_mode_default(self):
assert marathon_tools.ServiceNamespaceConfig().get_mode() is None
def test_get_mode_default_when_port_specified(self):
config = {'proxy_port': 1234}
assert marathon_tools.ServiceNamespaceConfig(config).get_mode() == 'http'
def test_get_mode_valid(self):
config = {'mode': 'tcp'}
assert marathon_tools.ServiceNamespaceConfig(config).get_mode() == 'tcp'
def test_get_mode_invalid(self):
config = {'mode': 'paasta'}
with raises(marathon_tools.InvalidSmartstackMode):
marathon_tools.ServiceNamespaceConfig(config).get_mode()
def test_get_healthcheck_uri_default(self):
assert marathon_tools.ServiceNamespaceConfig().get_healthcheck_uri() == '/status'
def test_get_discover_default(self):
assert marathon_tools.ServiceNamespaceConfig().get_discover() == 'region'
def test_deformat_job_id():
expected = ('ser_vice', 'in_stance', 'git_hash', 'config_hash')
assert marathon_tools.deformat_job_id('ser--vice.in--stance.git--hash.config--hash') == expected
def test_create_complete_config_no_smartstack():
service = "service"
instance = "instance"
fake_job_id = "service.instance.some.hash"
fake_marathon_config = marathon_tools.MarathonConfig({}, 'fake_file.json')
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
service,
instance,
{},
{'docker_image': 'abcdef'},
)
fake_system_paasta_config = SystemPaastaConfig({
'volumes': [],
'docker_registry': 'fake_docker_registry:443'
}, '/fake/dir/')
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
fake_cluster = "clustername"
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config', return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config', return_value=fake_service_namespace_config),
mock.patch('marathon_tools.format_job_id', return_value=fake_job_id),
mock.patch('marathon_tools.load_system_paasta_config', return_value=fake_system_paasta_config),
mock.patch('marathon_tools.get_mesos_slaves_grouped_by_attribute',
autospec=True, return_value={'fake_region': {}})
) as (
mock_load_marathon_service_config,
mock_load_service_namespace_config,
mock_format_job_id,
mock_system_paasta_config,
_,
):
mock_system_paasta_config.return_value.get_cluster = mock.Mock(return_value=fake_cluster)
actual = marathon_tools.create_complete_config(service, instance, fake_marathon_config)
expected = {
'container': {
'docker': {
'portMappings': [{'protocol': 'tcp', 'containerPort': 8888, 'hostPort': 0}],
'image': 'fake_docker_registry:443/abcdef',
'network': 'BRIDGE'
},
'type': 'DOCKER',
'volumes': [],
},
'instances': 1,
'mem': 1024,
'cmd': None,
'args': [],
'backoff_factor': 2,
'cpus': 0.25,
'uris': ['file:///root/.dockercfg'],
'backoff_seconds': 1,
'health_checks': [],
'env': {},
'id': fake_job_id,
'constraints': [["region", "GROUP_BY", "1"]],
}
assert actual == expected
# Assert that the complete config can be inserted into the MarathonApp model
assert MarathonApp(**actual)
def test_create_complete_config_with_smartstack():
service = "service"
instance = "instance"
fake_job_id = "service.instance.some.hash"
fake_marathon_config = marathon_tools.MarathonConfig({}, 'fake_file.json')
fake_marathon_service_config = marathon_tools.MarathonServiceConfig(
service,
instance,
{},
{'docker_image': 'abcdef'},
)
fake_system_paasta_config = SystemPaastaConfig({
'volumes': [],
'docker_registry': 'fake_docker_registry:443'
}, '/fake/dir/')
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({'proxy_port': 9001})
fake_cluster = "clustername"
with contextlib.nested(
mock.patch('marathon_tools.load_marathon_service_config', return_value=fake_marathon_service_config),
mock.patch('marathon_tools.load_service_namespace_config', | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['SensitivityLabelArgs', 'SensitivityLabel']
@pulumi.input_type
class SensitivityLabelArgs:
def __init__(__self__, *,
column_name: pulumi.Input[str],
database_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
schema_name: pulumi.Input[str],
server_name: pulumi.Input[str],
table_name: pulumi.Input[str],
information_type: Optional[pulumi.Input[str]] = None,
information_type_id: Optional[pulumi.Input[str]] = None,
label_id: Optional[pulumi.Input[str]] = None,
label_name: Optional[pulumi.Input[str]] = None,
rank: Optional[pulumi.Input['SensitivityLabelRank']] = None,
sensitivity_label_source: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SensitivityLabel resource.
:param pulumi.Input[str] column_name: The name of the column.
:param pulumi.Input[str] database_name: The name of the database.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] schema_name: The name of the schema.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] table_name: The name of the table.
:param pulumi.Input[str] information_type: The information type.
:param pulumi.Input[str] information_type_id: The information type ID.
:param pulumi.Input[str] label_id: The label ID.
:param pulumi.Input[str] label_name: The label name.
:param pulumi.Input[str] sensitivity_label_source: The source of the sensitivity label.
"""
pulumi.set(__self__, "column_name", column_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "schema_name", schema_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "table_name", table_name)
if information_type is not None:
pulumi.set(__self__, "information_type", information_type)
if information_type_id is not None:
pulumi.set(__self__, "information_type_id", information_type_id)
if label_id is not None:
pulumi.set(__self__, "label_id", label_id)
if label_name is not None:
pulumi.set(__self__, "label_name", label_name)
if rank is not None:
pulumi.set(__self__, "rank", rank)
if sensitivity_label_source is not None:
pulumi.set(__self__, "sensitivity_label_source", sensitivity_label_source)
@property
@pulumi.getter(name="columnName")
def column_name(self) -> pulumi.Input[str]:
"""
The name of the column.
"""
return pulumi.get(self, "column_name")
@column_name.setter
def column_name(self, value: pulumi.Input[str]):
pulumi.set(self, "column_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The name of the database.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> pulumi.Input[str]:
"""
The name of the schema.
"""
return pulumi.get(self, "schema_name")
@schema_name.setter
def schema_name(self, value: pulumi.Input[str]):
pulumi.set(self, "schema_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="tableName")
def table_name(self) -> pulumi.Input[str]:
"""
The name of the table.
"""
return pulumi.get(self, "table_name")
@table_name.setter
def table_name(self, value: pulumi.Input[str]):
pulumi.set(self, "table_name", value)
@property
@pulumi.getter(name="informationType")
def information_type(self) -> Optional[pulumi.Input[str]]:
"""
The information type.
"""
return pulumi.get(self, "information_type")
@information_type.setter
def information_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "information_type", value)
@property
@pulumi.getter(name="informationTypeId")
def information_type_id(self) -> Optional[pulumi.Input[str]]:
"""
The information type ID.
"""
return pulumi.get(self, "information_type_id")
@information_type_id.setter
def information_type_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "information_type_id", value)
@property
@pulumi.getter(name="labelId")
def label_id(self) -> Optional[pulumi.Input[str]]:
"""
The label ID.
"""
return pulumi.get(self, "label_id")
@label_id.setter
def label_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_id", value)
@property
@pulumi.getter(name="labelName")
def label_name(self) -> Optional[pulumi.Input[str]]:
"""
The label name.
"""
return pulumi.get(self, "label_name")
@label_name.setter
def label_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_name", value)
@property
@pulumi.getter
def rank(self) -> Optional[pulumi.Input['SensitivityLabelRank']]:
return pulumi.get(self, "rank")
@rank.setter
def rank(self, value: Optional[pulumi.Input['SensitivityLabelRank']]):
pulumi.set(self, "rank", value)
@property
@pulumi.getter(name="sensitivityLabelSource")
def sensitivity_label_source(self) -> Optional[pulumi.Input[str]]:
"""
The source of the sensitivity label.
"""
return pulumi.get(self, "sensitivity_label_source")
@sensitivity_label_source.setter
def sensitivity_label_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sensitivity_label_source", value)
class SensitivityLabel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
information_type: Optional[pulumi.Input[str]] = None,
information_type_id: Optional[pulumi.Input[str]] = None,
label_id: Optional[pulumi.Input[str]] = None,
label_name: Optional[pulumi.Input[str]] = None,
rank: Optional[pulumi.Input['SensitivityLabelRank']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schema_name: Optional[pulumi.Input[str]] = None,
sensitivity_label_source: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A sensitivity label.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] column_name: The name of the column.
:param pulumi.Input[str] database_name: The name of the database.
:param pulumi.Input[str] information_type: The information type.
:param pulumi.Input[str] information_type_id: The information type ID.
:param pulumi.Input[str] label_id: The label ID.
:param pulumi.Input[str] label_name: The label name.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] schema_name: The name of the schema.
:param pulumi.Input[str] sensitivity_label_source: The source of the sensitivity label.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] table_name: The name of the table.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SensitivityLabelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A sensitivity label.
:param str resource_name: The name of the resource.
:param SensitivityLabelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SensitivityLabelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
information_type: Optional[pulumi.Input[str]] = None,
information_type_id: Optional[pulumi.Input[str]] = None,
label_id: Optional[pulumi.Input[str]] = None,
label_name: Optional[pulumi.Input[str]] = None,
rank: Optional[pulumi.Input['SensitivityLabelRank']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schema_name: Optional[pulumi.Input[str]] = None,
sensitivity_label_source: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SensitivityLabelArgs.__new__(SensitivityLabelArgs)
if column_name is None and not opts.urn:
raise TypeError("Missing required property 'column_name'")
__props__.__dict__["column_name"] = column_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["information_type"] = information_type
__props__.__dict__["information_type_id"] = information_type_id
__props__.__dict__["label_id"] = label_id
__props__.__dict__["label_name"] = label_name
__props__.__dict__["rank"] = rank
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if schema_name is None and not opts.urn:
raise TypeError("Missing required property 'schema_name'")
__props__.__dict__["schema_name"] = schema_name
__props__.__dict__["sensitivity_label_source"] = sensitivity_label_source
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
if table_name is None and not opts.urn:
raise TypeError("Missing required property 'table_name'")
__props__.__dict__["table_name"] = table_name
__props__.__dict__["is_disabled"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:SensitivityLabel"), pulumi.Alias(type_="azure-native:sql:SensitivityLabel"), pulumi.Alias(type_="azure-nextgen:sql:SensitivityLabel"), pulumi.Alias(type_="azure-native:sql/v20170301preview:SensitivityLabel"), pulumi.Alias(type_="azure-nextgen:sql/v20170301preview:SensitivityLabel"), pulumi.Alias(type_="azure-native:sql/v20200202preview:SensitivityLabel"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:SensitivityLabel"), pulumi.Alias(type_="azure-native:sql/v20201101preview:SensitivityLabel"), pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:SensitivityLabel"), pulumi.Alias(type_="azure-native:sql/v20210201preview:SensitivityLabel"), pulumi.Alias(type_="azure-nextgen:sql/v20210201preview:SensitivityLabel")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SensitivityLabel, __self__).__init__(
'azure-native:sql/v20200801preview:SensitivityLabel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SensitivityLabel':
"""
Get an existing SensitivityLabel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SensitivityLabelArgs.__new__(SensitivityLabelArgs)
__props__.__dict__["column_name"] = None
__props__.__dict__["information_type"] = None
__props__.__dict__["information_type_id"] = None
__props__.__dict__["is_disabled"] = None
__props__.__dict__["label_id"] = None
__props__.__dict__["label_name"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["rank"] = None
__props__.__dict__["schema_name"] = None
__props__.__dict__["table_name"] = None
__props__.__dict__["type"] = None
return SensitivityLabel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="columnName")
def column_name(self) -> pulumi.Output[str]:
"""
The column name.
"""
return pulumi.get(self, "column_name")
@property
@pulumi.getter(name="informationType")
def information_type(self) -> pulumi.Output[Optional[str]]:
"""
The information type.
"""
return pulumi.get(self, "information_type")
@property
@pulumi.getter(name="informationTypeId")
def information_type_id(self) -> pulumi.Output[Optional[str]]:
"""
The information type ID.
"""
return pulumi.get(self, "information_type_id")
@property
@pulumi.getter(name="isDisabled")
def is_disabled(self) -> pulumi.Output[bool]:
"""
Is sensitivity recommendation disabled. Applicable for recommended sensitivity label only. Specifies whether the sensitivity recommendation on this column is disabled (dismissed) or not.
"""
return pulumi.get(self, "is_disabled")
@property
@pulumi.getter(name="labelId")
def label_id(self) -> pulumi.Output[Optional[str]]:
"""
The label ID.
"""
return pulumi.get(self, "label_id")
@property
@pulumi.getter(name="labelName")
def label_name(self) -> pulumi.Output[Optional[str]]:
"""
The label name.
| |
# Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f6,), # Scooter ..Canoe
(0x1f910, 0x1f91e,), # Zipper-mouth Face ..Hand With Index And Midd
(0x1f920, 0x1f927,), # Face With Cowboy Hat ..Sneezing Face
(0x1f930, 0x1f930,), # Pregnant Woman ..Pregnant Woman
(0x1f933, 0x1f93e,), # Selfie ..Handball
(0x1f940, 0x1f94b,), # Wilted Flower ..Martial Arts Uniform
(0x1f950, 0x1f95e,), # Croissant ..Pancakes
(0x1f980, 0x1f991,), # Crab ..Squid
(0x1f9c0, 0x1f9c0,), # Cheese Wedge ..Cheese Wedge
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # (nil) ..(nil)
),
'10.0.0': (
# Source: EastAsianWidth-10.0.0.txt
# Date: 2017-03-08, 02:00:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # <NAME> ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
(0x02693, 0x02693,), # Anchor ..Anchor
(0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
(0x026d4, 0x026d4,), # No Entry ..No Entry
(0x026ea, 0x026ea,), # Church ..Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat ..Sailboat
(0x026fa, 0x026fa,), # Tent ..Tent
(0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles ..Sparkles
(0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
(0x03105, 0x0312e,), # Bopomofo Letter B ..Bopomofo Letter O With D
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..(nil)
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe1,), # Tangut Iteration Mark ..Nushu Iteration Mark
(0x17000, 0x187ec,), # (nil) ..(nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
(0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
(0x1f440, 0x1f440,), # Eyes ..Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f8,), # Scooter ..Flying Saucer
(0x1f910, 0x1f93e,), # Zipper-mouth Face ..Handball
(0x1f940, 0x1f94c,), # Wilted Flower ..Curling Stone
(0x1f950, 0x1f96b,), # Croissant ..Canned Food
(0x1f980, 0x1f997,), # Crab ..Cricket
(0x1f9c0, 0x1f9c0,), # Cheese Wedge ..Cheese Wedge
(0x1f9d0, 0x1f9e6,), # Face With Monocle ..Socks
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # (nil) ..(nil)
),
'11.0.0': (
# Source: EastAsianWidth-11.0.0.txt
# Date: 2018-05-14, 09:41:59 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
(0x02693, 0x02693,), # Anchor ..Anchor
(0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
(0x026d4, 0x026d4,), # No Entry ..No Entry
(0x026ea, 0x026ea,), # Church ..Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat ..Sailboat
(0x026fa, 0x026fa,), # Tent ..Tent
(0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles ..Sparkles
(0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A | |
False
if name.startswith('panda.um.'):
self.proxyLock.acquire()
retMer,resMer = taskBuffer.querySQLS("SELECT /*+ index(tab FILESTABLE4_DESTDBLOCK_IDX) */ PandaID FROM ATLAS_PANDA.filesTable4 tab WHERE destinationDBlock=:destinationDBlock AND status IN (:statusM,:statusF) ",
{':destinationDBlock':name,
':statusM':'merging',
':statusF':'failed'})
self.proxyLock.release()
if resMer is not None and len(resMer)>0:
mergeID = resMer[0][0]
# get merging jobs
self.proxyLock.acquire()
mergingJobs = taskBuffer.peekJobs([mergeID],fromDefined=False,fromArchived=False,fromWaiting=False)
self.proxyLock.release()
mergeJob = mergingJobs[0]
if mergeJob is not None:
tmpDestDBlocks = []
# get destDBlock
for tmpFile in mergeJob.Files:
if tmpFile.type in ['output','log']:
if tmpFile.destinationDBlock not in tmpDestDBlocks:
tmpDestDBlocks.append(tmpFile.destinationDBlock)
# run
_logger.debug("start JEDI closer for %s " % name)
self.proxyLock.acquire()
cThr = Closer(taskBuffer,tmpDestDBlocks,mergeJob)
cThr.start()
cThr.join()
self.proxyLock.release()
_logger.debug("end JEDI closer for %s " % name)
continue
else:
_logger.debug("failed to get merging job for %s " % name)
else:
_logger.debug("failed to get merging file for %s " % name)
status,out = True,''
elif dsExists:
# check if dataset exists
status,out = rucioAPI.getMetaData(name)
if status is True:
if out is not None:
try:
rucioAPI.closeDataset(name)
status = True
except Exception:
errtype,errvalue = sys.exc_info()[:2]
out = 'failed to freeze : {0} {1}'.format(errtype,errvalue)
status = False
else:
# dataset not exist
status,out = True,''
dsExists = False
else:
status,out = True,''
if not status:
_logger.error('{0} failed to freeze with {1}'.format(name,out))
else:
self.proxyLock.acquire()
varMap = {}
varMap[':vuid'] = vuid
varMap[':status'] = 'completed'
taskBuffer.querySQLS("UPDATE ATLAS_PANDA.Datasets SET status=:status,modificationdate=CURRENT_DATE WHERE vuid=:vuid",
varMap)
self.proxyLock.release()
if name.startswith('pandaddm_') or name.startswith('panda.um.') or not dsExists:
continue
# set tobedeleted to dis
setTobeDeletedToDis(name)
# count # of files
status,out = rucioAPI.getNumberOfFiles(name)
if status is not True:
if status is False:
_logger.error(out)
else:
_logger.debug(out)
try:
nFile = int(out)
_logger.debug(nFile)
if nFile == 0:
# erase dataset
_logger.debug('erase %s' % name)
status,out = rucioAPI.eraseDataset(name)
_logger.debug('OK with %s' % name)
except Exception:
pass
else:
_logger.debug("wait %s " % name)
self.proxyLock.acquire()
taskBuffer.querySQLS("UPDATE ATLAS_PANDA.Datasets SET modificationdate=CURRENT_DATE WHERE vuid=:vuid", {':vuid':vuid})
self.proxyLock.release()
_logger.debug("end %s " % name)
except Exception:
errStr = traceback.format_exc()
_logger.error(errStr)
self.pool.remove(self)
self.lock.release()
# freeze dataset
_logger.debug("==== freeze datasets ====")
timeLimitRU = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
timeLimitRL = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
timeLimitU = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
timeLimitL = datetime.datetime.utcnow() - datetime.timedelta(days=14)
# reset doing so that Closer can update unmerged datasets
sql = "SELECT name FROM ATLAS_PANDA.Datasets "
sql += "WHERE type=:type AND (modificationdate BETWEEN :modificationdateRL AND :modificationdateRU) AND subType=:subType AND status=:oldStatus "
varMap = {}
varMap[':modificationdateRU'] = timeLimitRU
varMap[':modificationdateRL'] = timeLimitRL
varMap[':type'] = 'output'
varMap[':subType'] = 'sub'
varMap[':oldStatus'] = 'doing'
retReset,resReset = taskBuffer.querySQLS(sql,varMap)
sql = "UPDATE ATLAS_PANDA.Datasets SET status=:newStatus,modificationdate=:modificationdateU WHERE name=:name AND status=:oldStatus "
if resReset is not None:
for name, in resReset:
varMap = {}
varMap[':name'] = name
varMap[':oldStatus'] = 'doing'
varMap[':newStatus'] = 'running'
varMap[':modificationdateU'] = timeLimitU
_logger.debug("reset {0} to freeze".format(name))
taskBuffer.querySQLS(sql,varMap)
# loop for freezer
freezeLock = threading.Semaphore(5)
freezeProxyLock = threading.Lock()
freezeThreadPool = ThreadPool()
maxRows = 100000
while True:
# lock
freezeLock.acquire()
# get datasets
sqlQuery = "type=:type AND status IN (:status1,:status2,:status3,:status4,:status5) " + \
"AND (modificationdate BETWEEN :modificationdateL AND :modificationdateU) AND subType=:subType AND rownum <= %s" % maxRows
varMap = {}
varMap[':modificationdateU'] = timeLimitU
varMap[':modificationdateL'] = timeLimitL
varMap[':type'] = 'output'
varMap[':status1'] = 'running'
varMap[':status2'] = 'created'
varMap[':status3'] = 'defined'
varMap[':status4'] = 'locked'
varMap[':status5'] = 'doing'
varMap[':subType'] = 'sub'
freezeProxyLock.acquire()
res = taskBuffer.getLockDatasets(sqlQuery,varMap,modTimeOffset='90/24/60')
if res is None:
_logger.debug("# of datasets to be frozen: %s" % res)
else:
_logger.debug("# of datasets to be frozen: %s" % len(res))
if res is None or len(res)==0:
freezeProxyLock.release()
freezeLock.release()
break
freezeProxyLock.release()
# release
freezeLock.release()
# run freezer
iRows = 0
nRows = 500
while iRows < len(res):
freezer = Freezer(freezeLock,freezeProxyLock,res[iRows:iRows+nRows],freezeThreadPool)
freezer.start()
iRows += nRows
freezeThreadPool.join()
if len(res) < maxRows:
break
# delete dis datasets
class EraserThr (threading.Thread):
def __init__(self,lock,proxyLock,datasets,pool,operationType):
threading.Thread.__init__(self)
self.datasets = datasets
self.lock = lock
self.proxyLock = proxyLock
self.pool = pool
self.pool.add(self)
self.operationType = operationType
def run(self):
self.lock.acquire()
try:
# loop over all datasets
for vuid,name,modDate in self.datasets:
# only dis datasets
if re.search('_dis\d+$',name) is None:
_logger.error("Eraser : non disDS %s" % name)
continue
# delete
_logger.debug("Eraser %s dis %s %s" % (self.operationType,modDate,name))
# delete or shorten
endStatus = 'deleted'
status,out = rucioAPI.eraseDataset(name)
if not status:
_logger.error(out)
continue
_logger.debug('OK with %s' % name)
# update
self.proxyLock.acquire()
varMap = {}
varMap[':vuid'] = vuid
varMap[':status'] = endStatus
taskBuffer.querySQLS("UPDATE ATLAS_PANDA.Datasets SET status=:status,modificationdate=CURRENT_DATE WHERE vuid=:vuid",
varMap)
self.proxyLock.release()
except Exception:
errStr = traceback.format_exc()
_logger.error(errStr)
self.pool.remove(self)
self.lock.release()
# delete dis datasets
_logger.debug("==== delete dis datasets ====")
timeLimitU = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
timeLimitL = datetime.datetime.utcnow() - datetime.timedelta(days=3)
disEraseLock = threading.Semaphore(5)
disEraseProxyLock = threading.Lock()
disEraseThreadPool = ThreadPool()
#maxRows = 100000
maxRows = 5000
for targetStatus in ['deleting','shortening']:
for i in range(10):
# lock
disEraseLock.acquire()
# get datasets
varMap = {}
varMap[':modificationdateU'] = timeLimitU
varMap[':modificationdateL'] = timeLimitL
varMap[':type'] = 'dispatch'
varMap[':status'] = targetStatus
sqlQuery = "type=:type AND status=:status AND (modificationdate BETWEEN :modificationdateL AND :modificationdateU) AND rownum <= %s" % maxRows
disEraseProxyLock.acquire()
res = taskBuffer.getLockDatasets(sqlQuery,varMap,modTimeOffset='90/24/60')
if res is None:
_logger.debug("# of dis datasets for %s: None" % targetStatus)
else:
_logger.debug("# of dis datasets for %s: %s" % (targetStatus,len(res)))
if res is None or len(res)==0:
disEraseProxyLock.release()
disEraseLock.release()
break
disEraseProxyLock.release()
# release
disEraseLock.release()
# run disEraser
iRows = 0
nRows = 500
while iRows < len(res):
disEraser = EraserThr(disEraseLock,disEraseProxyLock,res[iRows:iRows+nRows],
disEraseThreadPool,targetStatus)
disEraser.start()
iRows += nRows
disEraseThreadPool.join()
if len(res) < 100:
break
_memoryCheck("finisher")
# finisher thread
class FinisherThr (threading.Thread):
def __init__(self,lock,proxyLock,ids,pool,timeNow):
threading.Thread.__init__(self)
self.ids = ids
self.lock = lock
self.proxyLock = proxyLock
self.pool = pool
self.timeNow = timeNow
self.pool.add(self)
def run(self):
self.lock.acquire()
try:
# get jobs from DB
ids = self.ids
self.proxyLock.acquire()
jobs = taskBuffer.peekJobs(ids,fromDefined=False,fromArchived=False,fromWaiting=False)
self.proxyLock.release()
upJobs = []
finJobs = []
for job in jobs:
if job is None or job.jobStatus == 'unknown':
continue
seList = ['dummy']
tmpNucleus = siteMapper.getNucleus(job.nucleus)
# get SEs
if job.prodSourceLabel == 'user' and job.destinationSE not in siteMapper.siteSpecList:
# using --destSE for analysis job to transfer output
seList = [job.destinationSE]
elif tmpNucleus is not None:
seList = list(tmpNucleus.allDdmEndPoints)
elif siteMapper.checkCloud(job.cloud):
# normal production jobs
if DataServiceUtils.checkJobDestinationSE(job) is None:
tmpDstID = siteMapper.getCloud(job.cloud)['dest']
else:
tmpDstID = job.destinationSE
tmpDstSite = siteMapper.getSite(tmpDstID)
scope_input, scope_output = select_scope(tmpDstSite, job.prodSourceLabel)
seList = tmpDstSite.ddm_endpoints_output[scope_output].getLocalEndPoints()
# get LFN list
lfns = []
guids = []
scopes = []
nTokens = 0
for file in job.Files:
# only output files are checked
if file.type == 'output' or file.type == 'log':
if file.status == 'nooutput':
continue
if DataServiceUtils.getDistributedDestination(file.destinationDBlockToken) is not None:
continue
lfns.append(file.lfn)
guids.append(file.GUID)
scopes.append(file.scope)
nTokens += len(file.destinationDBlockToken.split(','))
# get files in LRC
_logger.debug("%s Cloud:%s" % (job.PandaID,job.cloud))
tmpStat,okFiles = rucioAPI.listFileReplicas(scopes,lfns,seList)
if not tmpStat:
_logger.error("%s failed to get file replicas" % job.PandaID)
okFiles = {}
# count files
nOkTokens = 0
for okLFN in okFiles:
okSEs = okFiles[okLFN]
nOkTokens += len(okSEs)
# check all files are ready
_logger.debug("%s nToken:%s nOkToken:%s" % (job.PandaID,nTokens,nOkTokens))
if nTokens <= nOkTokens:
_logger.debug("%s Finisher : Finish" % job.PandaID)
for file in job.Files:
if file.type == 'output' or file.type == 'log':
if file.status != 'nooutput':
file.status = 'ready'
# append to run Finisher
finJobs.append(job)
else:
endTime = job.endTime
if endTime == 'NULL':
endTime = job.startTime
# priority-dependent timeout
tmpCloudSpec = siteMapper.getCloud(job.cloud)
if job.currentPriority >= 800 and (job.prodSourceLabel not in ['user']):
if 'transtimehi' in tmpCloudSpec:
timeOutValue = tmpCloudSpec['transtimehi']
else:
timeOutValue = 1
else:
if 'transtimelo' in tmpCloudSpec:
timeOutValue = tmpCloudSpec['transtimelo']
else:
timeOutValue = 2
# protection
if timeOutValue < 1:
timeOutValue = 1
timeOut = self.timeNow - datetime.timedelta(days=timeOutValue)
_logger.debug("%s Priority:%s Limit:%s End:%s" % (job.PandaID,job.currentPriority,str(timeOut),str(endTime)))
if endTime < timeOut:
# timeout
_logger.debug("%s Finisher : Kill" % job.PandaID)
strMiss = ''
for lfn in lfns:
if lfn not in okFiles:
strMiss += ' %s' % lfn
job.jobStatus = 'failed'
job.taskBufferErrorCode = pandaserver.taskbuffer.ErrorCode.EC_Transfer
job.taskBufferErrorDiag = 'transfer timeout for '+strMiss
guidMap = {}
for file in job.Files:
# set file status
if file.status == 'transferring' or file.type in ['log','output']:
file.status = 'failed'
# collect GUIDs to delete files from _tid datasets
if file.type == 'output' or file.type == 'log':
if file.destinationDBlock not in guidMap:
guidMap[file.destinationDBlock] = []
guidMap[file.destinationDBlock].append(file.GUID)
else:
# wait
_logger.debug("%s Finisher : Wait" % job.PandaID)
for lfn in lfns:
if lfn not in okFiles:
_logger.debug("%s -> %s" % (job.PandaID,lfn))
upJobs.append(job)
# update
_logger.debug("updating ...")
self.proxyLock.acquire()
taskBuffer.updateJobs(upJobs,False)
self.proxyLock.release()
# run Finisher
for | |
# tests for llc_rearrange.py
import pytest
import numpy as _np
import sys
sys.path.append('/Users/Mikejmnez/llc_transformations/llc_rearrange/')
from LLC_rearrange import LLCtransformation as LLC
from LLC_rearrange import make_chunks, make_array, pos_chunks, chunk_sizes, face_connect, arct_connect, init_vars
from LLC_rearrange import Dims
from oceanspy import open_oceandataset
Datadir = './tests/' # .oceanspy/tests/Data/
ECCO_url = "{}catalog_ECCO.yaml".format(Datadir)
od = open_oceandataset.from_catalog('LLC', ECCO_url)
Nx = od._ds.dims['X']
Ny = od._ds.dims['Y']
@pytest.mark.parametrize(
"od, var, expected", [
(od, 'T', ('X', 'Y', 'face', 'Z', 'time')),
(od, 'U', ('Xp1', 'Y', 'face', 'Z', 'time')),
(od, 'V', ('X', 'Yp1', 'face', 'Z', 'time')),
]
)
def test_original_dims(od, var, expected):
""" test original dimensions
"""
dims = Dims([dim for dim in od._ds[var].dims][::-1])
assert dims == expected
faces = [k for k in range(13)]
nrot_expected = [0, 1, 2, 3, 4, 5]
rot_expected = [7, 8, 9, 10, 11, 12]
@pytest.mark.parametrize(
"od, faces, nrot_expected, rot_expected", [
(od, faces, nrot_expected, rot_expected),
(od, faces[3:6], nrot_expected[3:6], []),
(od, faces[8:11], [], rot_expected[1:4])
]
)
def test_face_connect(od, faces, nrot_expected, rot_expected):
ds = od._ds
nrot_faces, a, b, rot_faces, *nn = face_connect(ds, faces)
assert nrot_faces == nrot_expected
assert rot_faces == rot_expected
expected = [2, 5, 7, 10] # faces that connect with arctic cap face=6
acshape = (Nx // 2, Ny)
@pytest.mark.parametrize(
"od, faces, expected, acshape", [
(od, faces, expected, acshape),
(od, faces[:2], [], []),
(od, faces[:6], [], []),
(od, [0, 1, 2, 6], expected[:1], acshape),
(od, faces[:7], expected[:2], acshape),
(od, faces[6:], expected[2:], acshape)
]
)
def test_arc_connect(od, faces, expected, acshape):
ds = od._ds
arc_faces, *a, ARCT = arct_connect(ds, 'XG', faces)
assert arc_faces == expected
assert len(ARCT) == len(expected)
if len(ARCT) > 0:
assert ARCT[0].shape == acshape # arctic crown
@pytest.mark.parametrize(
"faces, Nx, Ny, rot, exp_tNX, exp_tNY", [
(faces[:6], Nx, Ny, False, 180, 270),
(faces[6:], Nx, Ny, True, 180, 270),
(faces[:3], Nx, Ny, False, 90, 270),
(faces[3:6], Nx, Ny, False, 90, 270),
(faces[7:10], Nx, Ny, True, 90, 270),
(faces[10:], Nx, Ny, True, 90, 270),
([0, 2], Nx, Ny, False, None, None),
([1, 3], Nx, Ny, False, None, None),
([0, 4], Nx, Ny, False, None, None),
([0, 5], Nx, Ny, False, None, None),
([1, 3], Nx, Ny, False, None, None),
([1, 5], Nx, Ny, False, None, None),
([2, 3], Nx, Ny, False, None, None),
([2, 4], Nx, Ny, False, None, None),
([0, 1, 4, 5], Nx, Ny, False, None, None),
([1, 2, 3, 4], Nx, Ny, False, None, None),
([0, 4, 5], Nx, Ny, False, None, None),
([7, 10], Nx, Ny, True, 180, 90),
([7, 11], Nx, Ny, True, None, None),
([7, 12], Nx, Ny, True, None, None),
([8, 10], Nx, Ny, True, None, None),
([8, 12], Nx, Ny, True, None, None),
([9, 10], Nx, Ny, True, None, None),
([9, 11], Nx, Ny, True, None, None),
([7, 8, 11, 12], Nx, Ny, True, None, None),
([8, 9, 10, 11], Nx, Ny, True, None, None)
]
)
def test_chunk_sizes(faces, Nx, Ny, rot, exp_tNX, exp_tNY):
if _is_connect(faces, rotated=rot):
tNy, tNx = chunk_sizes(faces, [Nx], [Ny], rotated=rot)
assert tNy == exp_tNY
assert tNx == exp_tNX
else:
with pytest.raises(ValueError):
tNy, tNx = chunk_sizes(faces, [Nx], [Ny], rotated=rot)
assert tNy == exp_tNY
assert tNx == exp_tNX
@pytest.mark.parametrize(
"faces, rot, NX, NY, expCX, expCY, epx, epy, epax, epay", [
(faces[:7], False, Nx, Ny, [[0, 90], [90, 180]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90],
[90, 180], [90, 180], [90, 180]],
[[0, 90], [90, 180], [180, 270],
[0, 90], [90, 180], [180, 270]],
[[0, 90], [90, 180]],
[[270, 315], [270, 315]]),
(faces[6:], True, Nx, Ny, [[0, 90], [90, 180]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90],
[90, 180], [90, 180], [90, 180]],
[[0, 90], [90, 180], [180, 270],
[0, 90], [90, 180], [180, 270]],
[[0, 90], [90, 180]],
[[270, 315], [270, 315]]),
(faces[:3] + [6], False, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90]], [[270, 315]]),
(faces[:3], False, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[], []),
(faces[3:7], False, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90]], [[270, 315]]),
(faces[3:6], False, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[], []),
(faces[6:10], True, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90]], [[270, 315]]),
(faces[7:10], True, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[], []),
([6] + faces[10:], True, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90]], [[270, 315]],),
(faces[10:], True, Nx, Ny, [[0, 90]],
[[0, 90], [90, 180], [180, 270]],
[[0, 90], [0, 90], [0, 90]],
[[0, 90], [90, 180], [180, 270]],
[], []),
([6, 7, 10], True, Nx, Ny, [[0, 90], [90, 180]], [[0, 90]],
[[0, 90], [90, 180]],
[[0, 90], [0, 90]],
[[0, 90], [90, 180]],
[[90, 135], [90, 135]]),
([2, 5, 6], False, Nx, Ny, [[0, 90], [90, 180]],
[[0, 90]],
[[0, 90], [90, 180]],
[[0, 90], [0, 90]],
[[0, 90], [90, 180]],
[[90, 135], [90, 135]]),
]
)
def test_make_chunks(faces, rot, NX, NY, expCX, expCY, epx, epy, epax, epay):
if rot:
fs = [k for k in faces if k in _np.arange(7, 13)]
else:
fs = [k for k in faces if k in _np.arange(6)]
tNy, tNx = chunk_sizes(faces, [Nx], [Ny], rotated=rot)
delNX = 0
delNY = 0
afs = []
if 6 in faces:
acnrot_fs = [k for k in faces if k in _np.array([2, 5])]
acrot_fs = [k for k in faces if k in _np.array([7, 10])]
if rot:
delNX = int(Nx / 2)
afs = acrot_fs
else:
delNY = int(Ny / 2)
afs = acnrot_fs
tNy = tNy + delNY
tNx = tNx + delNX
Nxc = _np.arange(0, tNx + 1, Nx)
Nyc = _np.arange(0, tNy + 1, Ny)
xChunk, yChunk = make_chunks(Nxc, Nyc)
assert xChunk == expCX
assert yChunk == expCY
py, px, pyarc, pxarc = pos_chunks(fs, afs, yChunk, xChunk)
assert epy == py
assert epx == px
assert epay == pyarc
assert epax == pxarc
transf = ['arctic_crown', 'arctic_centered']
cent = ['Atlantic', 'Pacific']
varlist = ['T', 'U', 'V']
@pytest.mark.parametrize(
"od, faces, varlist, transf, centered, drop, expNX, expNY", [
(od, [2, 6, 10], 'all', transf[0], cent[0], True, 179, 134),
(od, [2, 5, 6, 7, 10], 'T', transf[0], cent[0], False, 360, 135),
(od, faces[:6], 'T', transf[0], cent[0], False, 180, 270),
(od, [2, 5, 6, 7, 10], 'T', transf[1], cent[0], True, 269, 269),
(od, faces, 'T', transf[1], cent[0], True, 269, 269),
],
)
def test_transformation(od, faces, varlist, transf, centered, drop, expNX,
expNY):
ds = od._ds.reset_coords()
args = {
"ds": ds,
"varlist": varlist,
"centered": centered,
"faces": faces,
"drop": drop,
}
if transf == 'arctic_crown':
_transf = LLC.arctic_crown
elif transf == 'arctic_centered':
_transf = LLC.arctic_centered
ds = _transf(**args)
Nx = ds.dims['X']
Ny = ds.dims['Y']
assert Nx == expNX
assert Ny == expNY
@pytest.mark.parametrize(
"od, tNX, tNY, X0", [
(od, 100, 200, 0),
(od, 200, 400, 100),
(od, None, None, 'Five'),
(od, 'Four', None, None),
(od, 0, 0, 0),
]
)
def test_make_vars(od, tNX, tNY, X0):
ds = od._ds.reset_coords()
if isinstance(tNX, int) and isinstance(tNY, int) and isinstance(X0, int):
nds = make_array(ds, tNX, tNY, X0)
assert (set(nds.dims) - set(ds.dims)) == set([])
assert nds.dims['X'] == tNX
assert nds.dims['Y'] == tNY
assert nds.dims['Z'] == ds.dims['Z']
assert nds.dims['time'] == ds.dims['time']
else:
with pytest.raises(TypeError):
nds = make_array(ds, tNX, tNY, X0)
@pytest.mark.parametrize(
"od, tNX, tNY, X0, varlist", [
(od, 100, 200, 0, ['T']),
(od, 200, 400, 10, ['U']),
(od, 200, 400, 0, ['T', 'U', 'V'])
]
)
def test_init_vars(od, tNX, tNY, X0, varlist):
ds = od._ds.reset_coords()
nds = make_array(ds, tNX, tNY, X0)
nds = init_vars(ds, nds, varlist)
for var in varlist:
assert set(ds[var].dims) - set(nds[var].dims) == set(["face"])
def _is_connect(faces, rotated=False):
""" do faces in a facet connect? Not applicable to arc cap, | |
import os
import numpy as np
import random
from sklearn.utils import shuffle
import scipy.io
import matlab.engine
import time
import glob
import argparse
from utils_noisescope import *
import logging
import joblib
random.seed(6666)
eng = matlab.engine.start_matlab()
def extract_fingerpint_via_clustering(all_res_paths, ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round, img_dim, outlier_model_path, result_dir,
reduce_matrix=None, merged_cluster=None):
'''
Fingerprint Step 2 + 3.
:param all_res_paths: noise residuals of the test set.
:param ground_truth_label: gound truth labels for the test set.
:param thre_pce: T merge calibrated using function 'correlation_between_real_fps' in pipeline.py
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param iter_round: clustering/merging iteration round
:param img_dim: image/residual dimension
:param outlier_model_path: fingerprint outlier detector
:param result_dir: save log, middle products like .mat files
:param logfile: log file
:param reduce_matrix: previous pair-wise correlation reused for this round of merging iteration
:param merged_cluster: Newly merged clusters from the last merging step
:return: ret_fake_cluster_list: A list of fake (model) clusters flagged; ret_cluster_list_with_image_idx: residual indexs in the flagged clusters
'''
logging.info("++++++++++PERFORM THE NEXT MERGING ITERATION++++++++++++\n")
logging.info('Currently, there are {} clusters\n'.format(len(
cluster_list_with_image_idx))) # cluster_list_with_image_idx show the latest cluster distribution and clusters
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 10:
fake_purity = compute_cluster_fake_purity(cluster_with_img_idx, ground_truth_label)
logging.info(
'This cluster has {} images with a fake purity: {} \n'.format(len(cluster_with_img_idx), fake_purity))
num_cluster = len(cluster_list_with_image_idx)
### calculate PCE matrix ###
if iter_round > 0:
pce_matrix = np.full((num_cluster, num_cluster), 0, dtype=float)
pce_matrix[0:num_cluster - len(merged_cluster), 0: num_cluster - len(merged_cluster)] = reduce_matrix # 98, 98
eng.get_pce_matrix_iterate(all_res_paths, cluster_list_with_image_idx, len(merged_cluster), img_dim,
result_dir,
iter_round)
new_pce_matrix = scipy.io.loadmat(result_dir + '{}_partial.mat'.format(iter_round))
pce_matrix[num_cluster - len(merged_cluster):, :] = np.array(new_pce_matrix['matrix'])
else:
t1 = time.time()
eng.get_pce_matrix_noise_average(all_res_paths, cluster_list_with_image_idx, result_dir, iter_round,
img_dim)
t2 = time.time()
logging.info('The first iteration takes {} seconds. \n'.format(t2 - t1))
pce_matrix = scipy.io.loadmat(result_dir + '{}.mat'.format(iter_round))
pce_matrix = np.array(pce_matrix['matrix'])
large_pce_pos_array = np.where(pce_matrix > thre_pce)
x_axis_idx = large_pce_pos_array[0]
y_axis_idx = large_pce_pos_array[1]
logging.info("{} pairs in the matrix is larger than the threshold. \n".format(len(list(x_axis_idx))))
# return cases for early stopping
sorted_cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
# if len(sorted_cluster_list_with_image_idx[0]) > 200: # if we have a big cluster >200, we test it
if len(sorted_cluster_list_with_image_idx[
0]) > 150: # if we have a big cluster > 150, we start the early stopping strategy
feed_list = []
for idx_tuple in sorted_cluster_list_with_image_idx:
if len(idx_tuple) > 50: # pick cluster size [50, X)
feed_list.append(idx_tuple)
else:
break
# return feed_list, tuple_tree_dict, cluster_list_with_image_idx # for skipping
fake_cluster_list, fake_flagged = fingerprint_classifier(feed_list, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
logging.info(
"We detected suspicious fake clusters, NoiseScope will perform fingerprint classifier next.")
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info(
"Available candidate clusters are not recognized outliers, NoiseScope continues to do clustering.")
# another return case, when there is no more high correlated pairs
if len(list(x_axis_idx)) == 0:
fake_cluster_list, fake_flagged = fingerprint_classifier(sorted_cluster_list_with_image_idx, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info("No fake clusters are flagged, NoiseScope will stop the detection.")
return fake_cluster_list, cluster_list_with_image_idx
# confirm how many pairs can be merged
idx_pairs = list(zip(x_axis_idx, y_axis_idx)) # idx_pairs includes all pair positions
idx_pairs_with_pce = list(map(lambda x: x + (pce_matrix[x[0], x[1]],), idx_pairs))
sorted_idx_pairs_with_pce = sorted(idx_pairs_with_pce, key=lambda x: x[2], reverse=True)
idx_pair_for_merge = []
delete_idxs = []
while len(sorted_idx_pairs_with_pce) > 0: # which means still having pairs to merge
x_idx_max_pce = sorted_idx_pairs_with_pce[0][0]
y_idx_max_pce = sorted_idx_pairs_with_pce[0][1]
assert pce_matrix[x_idx_max_pce][y_idx_max_pce] == sorted_idx_pairs_with_pce[0][2]
idx_pair_for_merge.append((x_idx_max_pce, y_idx_max_pce))
logging.info(
'Maximum pce value from current idx pairs is: {}\n'.format(pce_matrix[x_idx_max_pce][y_idx_max_pce]))
delete_idxs.append(x_idx_max_pce)
delete_idxs.append(y_idx_max_pce)
sorted_idx_pairs_with_pce[:] = [idx_pair for idx_pair in sorted_idx_pairs_with_pce if
(x_idx_max_pce not in idx_pair) and (y_idx_max_pce not in idx_pair)]
### merging rules ###
merge_clusters_set = set([]) # contain merged tuples that should be added
delete_clusters_set = set([]) # contain tuples that need to be deleted
for idx_pair in idx_pair_for_merge:
# record all the clusters need to be deleted from cluster_list_with_image_idx
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[0]])
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[1]])
# record all the merged cluster need to be added into cluster_list_with_image_idx
merge_tuple = cluster_list_with_image_idx[idx_pair[0]] + cluster_list_with_image_idx[idx_pair[1]]
merge_clusters_set.add(merge_tuple)
# here we remove clusters in delete_clusters_set
for delete_tuple in delete_clusters_set:
cluster_list_with_image_idx.remove(delete_tuple)
# here we add merged clusters in all_merge_set
for merge_tuple in merge_clusters_set:
cluster_list_with_image_idx.append(merge_tuple)
pce_values_for_next_iter = []
for i in range(0, num_cluster):
if i in delete_idxs:
continue
for j in range(0, num_cluster):
if j in delete_idxs:
continue
pce_values_for_next_iter.append(pce_matrix[i, j])
pce_matrix = np.reshape(pce_values_for_next_iter, (num_cluster - len(delete_idxs), num_cluster - len(delete_idxs)))
ret_fake_cluster_list, ret_cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round + 1,
img_dim,
outlier_model_path,
result_dir,
pce_matrix,
merge_clusters_set)
return ret_fake_cluster_list, ret_cluster_list_with_image_idx
def fake_image_detector(fake_cluster_list, test_res_paths, ground_truth, img_dim, refer_dir):
'''
NoiseScope step 4.
:param fake_cluster_list: A list of fake clusters. Each cluster includes all the residual indexes.
:param test_res_paths: noise residual paths for test set.
:param ground_truth: Ground truth label for the test residuals.
:param img_dim: image/residual size
:param logfile: log file
:param refer_dir: reference dir
:return: detection F1 score
'''
if len(fake_cluster_list) == 0:
logging.info('No model fingerprint found! The detection will stop here! \n')
return
refer_res_paths = glob.glob(refer_dir + '*.mat')
test_max_pce = []
refer_max_pce = []
all_test_pce = []
all_refer_pce = []
cluster_stat = []
single_cluster_f1_scores = []
for i, fake_cluster in enumerate(fake_cluster_list):
logging.info('This fake cluster includes residual id: {}. \n'.format(fake_cluster))
# adjust the index, because in matlab, index starts from 1.
fake_cluster_idx_minus = list(map(lambda x: x - 1, fake_cluster))
fake_pos = np.where(np.array(ground_truth) == 1)
fake_purity = len(set(fake_pos[0]).intersection(set(fake_cluster_idx_minus))) / len(fake_cluster)
cluster_stat.append((len(fake_cluster), fake_purity))
logging.info('This cluster has a fake purity of {}. \n'.format(fake_purity))
logging.info('This cluster has image samples{} \n'.format(len(fake_cluster)))
model_fingerprint = compute_fp_from_cluster(fake_cluster, test_res_paths, img_dim)
logging.info('The shape of fake fingerprint: {}. \n'.format(np.shape(model_fingerprint)))
test_pce_corr = compute_pce_with_fingerprint(test_res_paths, model_fingerprint)
refer_pce_corr = compute_pce_with_fingerprint(refer_res_paths, model_fingerprint)
all_test_pce.append(test_pce_corr[0])
all_refer_pce.append(refer_pce_corr[0])
if i == 0:
test_max_pce = test_pce_corr[0]
refer_max_pce = refer_pce_corr[0]
else:
test_max_pce = list(map(lambda x, y: max(x, y), test_max_pce, test_pce_corr[0]))
refer_max_pce = list(map(lambda x, y: max(x, y), refer_max_pce, refer_pce_corr[0]))
calibrate_thres = np.percentile(refer_max_pce, 99.5)
logging.info('Calibrated PCE threshold for fake image detector, {} \n'.format(calibrate_thres))
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_max_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("Clustered with PCE threshold: {}. \n".format(calibrate_thres))
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
final_f1 = metric_scores["f1_score"]
for test_pce in all_test_pce:
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("========Single cluster performance=========\n")
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
single_cluster_f1_scores.append(metric_scores["f1_score"])
return final_f1
def fingerprint_classifier(cluster_list_with_image_idx, res_list, outlier_model_path, img_dim):
'''
NoiseScope Step 3: fingerprint classifier
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param res_list: Noise residuals of test set.
:param outlier_model_path: Fingerprint outlier detector, which will flag model fingerprints as outliers
:param img_dim: image/residual size
:param logfile: log file
:return: a list of fake (model) clusters
'''
fake_cluster_list = []
fake_flagged = False
detection_model = joblib.load(outlier_model_path)
# cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 50: # find the fake set whose size is larger than 50
sampled_idx = random.sample(cluster_with_img_idx, 50) # sample cluster_list_with_image_idx
cluster_fp = compute_fp_from_cluster(sampled_idx, res_list, img_dim)
clipped_fp = clip_fp(cluster_fp)
haralick_feat = extract_haralick_features(clipped_fp)
pred_label = detection_model.predict(np.array(haralick_feat).reshape(1, -1))
if pred_label == -1:
fake_cluster_list.append(cluster_with_img_idx)
logging.info("One fake cluster is flagged, with {} images.\n".format(len(cluster_with_img_idx)))
else:
break
logging.info("{} fake clusters have been flagged.".format(len(fake_cluster_list)))
if len(fake_cluster_list) > 0: fake_flagged = True
return fake_cluster_list, fake_flagged
def detection_NoiseScope(args):
if args.result_dir[-1] != '/': args.result_dir = args.result_dir + '/'
if not os.path.exists(args.result_dir): os.mkdir(args.result_dir)
logging.basicConfig(filename='{}detection.log'.format(args.result_dir), filemode='w', level=logging.DEBUG, format='%(levelname)s:%(message)s')
real_res_list = random.sample(glob.glob(args.real_res_dir + '/*.mat'), args.num_real)
fake_res_list = random.sample(glob.glob(args.fake_res_dir + '/*.mat'), args.num_fake)
all_res_paths = real_res_list + fake_res_list
ground_truth_label = [0] * len(real_res_list) + [1] * len(fake_res_list)
shuffle_data = shuffle(list(zip(ground_truth_label, all_res_paths)))
[ground_truth_label_, all_res_paths_] = zip(*shuffle_data)
# logfile = open("{}logfile.txt".format(args.result_dir), "w")
all_res_paths = list(all_res_paths_)
ground_truth_label = ground_truth_label_
cluster_list_with_image_idx = [tuple([i]) for i in range(1, len(all_res_paths) + 1)]
############ find fake indexs and compute the fake fingerprint ################
logging.info('Merging threshold: {}\n'.format(args.pce_thre))
fake_cluster_list, cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
args.pce_thre,
cluster_list_with_image_idx,
0,
args.img_dim,
args.outlier_model_path,
args.result_dir)
f1_score = fake_image_detector(fake_cluster_list, all_res_paths, ground_truth_label, args.img_dim,
args.refer_res_dir)
return f1_score
if __name__ == '__main__':
'''
We grab 'num_real' samples from 'real_res_dir' and 'num_fake' samples from 'fake_res_dir'
specify the 'outlier_model_path' trained from prep_steps.py
| |
<gh_stars>10-100
# Source code is modified from and based off of
# old/original Appium Python implementation at
#
# https://github.com/hugs/appium-old
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from bottle import Bottle, request, response, redirect
from bottle import run, static_file
import ConfigParser
import json
import socket
import sys
import platform
import os
import subprocess
import base64
import urllib
import autopy
from time import time
from time import sleep
app = Bottle()
@app.get('/favicon.ico')
def get_favicon():
return static_file('favicon.ico', root='.')
def get_platform():
if sys.platform == "win32":
if platform.release() == "Vista":
wd_platform = "VISTA"
elif platform.release() == "XP": #?
wd_platform = "XP"
else:
wd_platform = "WINDOWS"
elif sys.platform == "darwin":
wd_platform = "MAC"
else: #sys.platform.startswith('linux'):
wd_platform = "LINUX"
return wd_platform
@app.route('/wd/hub/status', method='GET')
def status():
wd_platform = get_platform()
status = {'sessionId': app.SESSION_ID if app.started else None,
'status': 0,
'value': {'build': {'version': 'AutoPyDriverServer 0.1'},
'os': {'arch':platform.machine(),'name':wd_platform,'version':platform.release()}}}
return status
@app.route('/wd/hub/session', method='POST')
def create_session():
#process desired capabilities
request_data = request.body.read()
dc = json.loads(request_data).get('desiredCapabilities')
if dc is not None:
newTolerance = dc.get('imageRecognitionToleranceValue')
if newTolerance is not None:
app.tolerance = newTolerance
newImageFolder = dc.get('defaultImageFolder')
if newImageFolder is not None:
app.image_path = newImageFolder
newConfigFile = dc.get('defaultElementImageMapConfigFile')
if newConfigFile is not None:
app.element_locator_map_file = newConfigFile
#setup session
app.started = True
redirect('/wd/hub/session/%s' % app.SESSION_ID)
@app.route('/wd/hub/session/<session_id>', method='GET')
def get_session(session_id=''):
wd_platform = get_platform()
app_response = {'sessionId': session_id,
'status': 0,
'value': {"version":"0.1",
"browserName":"AutoPy",
"platform":wd_platform,
"takesScreenshot":True,
"imageRecognitionToleranceValue":app.tolerance,
"defaultImageFolder":app.image_path,
"defaultElementImageMapConfigFile":app.element_locator_map_file}}
return app_response
@app.route('/wd/hub/session/<session_id>', method='DELETE')
def delete_session(session_id=''):
app.started = False
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/execute', method='POST')
def execute_script(session_id=''):
result = ''
request_data = request.body.read()
try:
script = json.loads(request_data).get('script')
#args = json.loads(request_data).get('args')
#script_with_args = [script]
#script_with_args.extend(args)
#result = subprocess.check_output(script, stderr=subprocess.STDOUT)
#result = subprocess.check_output(script_with_args, stderr=subprocess.STDOUT)
proc_handle = os.popen(script)
result = proc_handle.read()
proc_handle.close()
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': result}
return app_response
@app.route('/wd/hub/session/<session_id>/element/<element_id>/click', method='POST')
def element_click(session_id='', element_id=''):
try:
_go_to_element(element_id)
autopy.mouse.click()
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/click', method='POST')
def mouse_click(session_id=''):
request_data = request.body.read()
if request_data == None or request_data == '' or request_data == "{}":
button = 0
else:
button = json.loads(request_data).get('button')
try:
if button == 1:
autopy.mouse.click(autopy.mouse.CENTER_BUTTON)
elif button == 2:
autopy.mouse.click(autopy.mouse.RIGHT_BUTTON)
else: #button 1
autopy.mouse.click()
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/buttonup', method='POST')
def mouse_up(session_id=''):
request_data = request.body.read()
if request_data == None or request_data == '' or request_data == "{}":
button = 0
else:
button = json.loads(request_data).get('button')
try:
if button == 1:
autopy.mouse.toggle(False,autopy.mouse.CENTER_BUTTON)
elif button == 2:
autopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)
else: #button 1
autopy.mouse.toggle(False)
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/buttondown', method='POST')
def mouse_down(session_id=''):
request_data = request.body.read()
if request_data == None or request_data == '' or request_data == "{}":
button = 0
else:
button = json.loads(request_data).get('button')
try:
if button == 1:
autopy.mouse.toggle(True,autopy.mouse.CENTER_BUTTON)
elif button == 2:
autopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)
else: #button 1
autopy.mouse.toggle(True)
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/moveto', method='POST')
def move_to(session_id=''):
request_data = request.body.read()
if request_data == None or request_data == '' or request_data == "{}":
element_id = None
xoffset = None
yoffset = None
else:
element_id = json.loads(request_data).get('element')
xoffset = json.loads(request_data).get('xoffset')
yoffset = json.loads(request_data).get('yoffset')
try:
if element_id == None and (xoffset != None or yoffset != None):
src = autopy.mouse.get_pos()
autopy.mouse.smooth_move(src[0]+xoffset,src[1]+yoffset)
else:
if xoffset != None or yoffset != None:
path = decode_value_from_wire(element_id)
elem = autopy.bitmap.Bitmap.open(path)
pos = autopy.bitmap.capture_screen().find_bitmap(elem,app.tolerance)
autopy.mouse.smooth_move(pos[0]+xoffset,pos[1]+yoffset)
else: # just go to center of element
_go_to_element(element_id)
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/element/<element_id>/value', method='POST')
def set_value(session_id='', element_id=''):
request_data = request.body.read()
try:
value_to_set = json.loads(request_data).get('value')
value_to_set = ''.join(value_to_set)
_go_to_element(element_id)
autopy.mouse.click()
autopy.key.type_string(value_to_set)
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
app_response = {'sessionId': session_id,
'status': 0,
'value': {}}
return app_response
@app.route('/wd/hub/session/<session_id>/element/<element_id>/elements', method='POST')
def element_find_elements(session_id='', element_id=''):
return _find_element(session_id, element_id, many=True)
@app.route('/wd/hub/session/<session_id>/elements', method='POST')
def find_elements(session_id=''):
return _find_element(session_id, "root", many=True)
@app.route('/wd/hub/session/<session_id>/element/<element_id>/element', method='POST')
def element_find_element(session_id='', element_id=''):
return _find_element(session_id, element_id)
@app.route('/wd/hub/session/<session_id>/element', method='POST')
def find_element(session_id=''):
return _find_element(session_id, "root")
def _go_to_element(element_id):
path = decode_value_from_wire(element_id)
elem = autopy.bitmap.Bitmap.open(path)
pos = autopy.bitmap.capture_screen().find_bitmap(elem,app.tolerance)
if autopy.mouse.get_pos() != pos:
autopy.mouse.smooth_move(pos[0]+(elem.width/2),pos[1]+(elem.height/2))
def _find_element(session_id, context, many=False):
try:
json_request_data = json.loads(request.body.read())
locator_strategy = json_request_data.get('using')
value = json_request_data.get('value')
if locator_strategy == "id":
path = app.config.get("Element Mapping",value)
elif locator_strategy == "name":
path = os.path.join(app.image_path,value)
elif locator_strategy == "xpath":
path = value
else:
response.status = 501
return {'sessionId': session_id, 'status': 32, 'value': 'Unsupported location strategy, use id, name, or XPath only. See docs for details.'}
elem = autopy.bitmap.Bitmap.open(path)
if not many:
if context == "root":
pos = autopy.bitmap.capture_screen().find_bitmap(elem,app.tolerance)
else:
canvas = autopy.bitmap.Bitmap.open(decode_value_from_wire(context))
pos = canvas.find_bitmap(elem,app.tolerance)
if pos is None:
return {'sessionId': session_id, 'status': 7, 'value': 'Element not found'}
found_elements = {'ELEMENT':encode_value_4_wire(path)}
else:
if context == "root":
if autopy.bitmap.capture_screen().count_of_bitmap(elem,app.tolerance) == 0:
found_elements = []
else:
temp_elements = []
result = autopy.bitmap.capture_screen().find_every_bitmap(elem,app.tolerance)
for pos in result:
temp_elements.append({'ELEMENT':encode_value_4_wire(path)})
found_elements = temp_elements
else:
canvas = autopy.bitmap.Bitmap.open(decode_value_from_wire(context))
if canvas.count_of_bitmap(elem,app.tolerance) == 0:
found_elements = []
else:
temp_elements = []
result = canvas.find_every_bitmap(elem,app.tolerance)
for pos in result:
temp_elements.append({'ELEMENT':encode_value_4_wire(path)})
found_elements = temp_elements
return {'sessionId': session_id, 'status': 0, 'value': found_elements}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/screenshot', method='GET')
def get_screenshot(session_id=''):
try:
path = os.path.join(os.path.dirname(os.tempnam()),'autopydriver_screenshot.png')
autopy.bitmap.capture_screen().save(path)
with open(path, 'rb') as screenshot:
encoded_file = base64.b64encode(screenshot.read())
return {'sessionId': session_id, 'status': 0, 'value': encoded_file}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/keys', method='POST')
def keys(session_id=''):
try:
request_data = request.body.read()
wired_keys = json.loads(request_data).get('value')
keys = ''.join(wired_keys)
autopy.key.type_string(keys)
return {'sessionId': session_id, 'status': 0}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/element/<element_id>/location', method='GET')
def element_location(session_id='', element_id=''):
try:
path = decode_value_from_wire(element_id)
elem = autopy.bitmap.Bitmap.open(path)
pos = autopy.bitmap.capture_screen().find_bitmap(elem,app.tolerance)
location = {'x': pos[0], 'y': pos[1]}
return {'sessionId': session_id, 'status': 0, 'value': location}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/element/<element_id>/size', method='GET')
def element_size(session_id='', element_id=''):
try:
path = decode_value_from_wire(element_id)
elem = autopy.bitmap.Bitmap.open(path)
size = {'width': elem.width, 'height': elem.height}
return {'sessionId': session_id, 'status': 0, 'value': size}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/element/<element_id>/displayed', method='GET')
def element_displayed(session_id='', element_id=''):
try:
path = decode_value_from_wire(element_id)
elem = autopy.bitmap.Bitmap.open(path)
pos = autopy.bitmap.capture_screen().find_bitmap(elem,app.tolerance)
displayed = True if pos is not None else False
return {'sessionId': session_id, 'status': 0, 'value': displayed}
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
@app.route('/wd/hub/session/<session_id>/file', method='POST')
def upload_file(session_id=''):
try:
request_data = request.body.read()
b64data = json.loads(request_data).get('file')
byteContent = base64.b64decode(b64data)
path = os.tempnam()
with open(path, 'wb') as f:
f.write(byteContent)
extracted_files = unzip(path,os.path.dirname(path))
except:
response.status = 400
return {'sessionId': session_id, 'status': 13, 'value': str(sys.exc_info()[0])}
# For (remote) file uploads - well currently AutoPyDriverServer will always be "remote"
# we can't formally/technically support multiple file uploads yet, due to Selenium issue 2239
# as the WebDriver/JSONWireProtocol spec doesn't define how to handle request/response
# of multiple files uploaded. Therefore, we assume user upload single file for now
result = "".join(extracted_files)
app_response = {'sessionId': session_id,
'status': 0,
'value': result}
return app_response
def unzip(source_filename, dest_dir):
import zipfile,os.path
files_in_zip = []
with zipfile.ZipFile(source_filename) as zf:
for member in zf.infolist():
words = member.filename.split('/')
path = dest_dir
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''): continue
path = os.path.join(path, word)
zf.extract(member, path)
unzipped_file = os.path.join(dest_dir,member.filename)
print "Unzipped a file: %s" % unzipped_file
files_in_zip.append(unzipped_file)
return files_in_zip
@app.error(404)
def unsupported_command(error):
response.content_type = 'text/plain'
return 'Unrecognized command, or AutoPyDriverServer does not support/implement this: %s %s' % (request.method, request.path)
def encode_value_4_wire(value):
return urllib.pathname2url(base64.b64encode(value))
def | |
'''
Functions for calculating the relief along the crust-mantle interface of
a planet using gravity and topography.
'''
import numpy as np
import pyshtools as pysh
# ==== pyMoho ====
def pyMoho(pot, topo, lmax, rho_c, rho_m, thickave, filter_type=0, half=None,
nmax=8, delta_max=5., lmax_calc=None, correction=None, quiet=False):
"""
Calculate the relief along the crust-mantle interface assuming a
constant density crust and mantle.
Returns
-------
moho : SHCoeffs class instance
The radius of the crust-mantle interface.
Parameters
----------
pot : SHGravCoeffs class instance
Gravitational potential spherical harmonic coefficients.
topo : SHCoeffs class instance
Spherical harmonic coefficients of the surface relief.
lmax : int
Maximum spherical harmonic degree of the function, which determines the
sampling interval of the internally computed grids.
rho_c : float
Crustal density in kg / m^3.
rho_m : float
Mantle density in kg / m^3.
thickave : float
Average thickness of the crust in meters.
filter_type : int, optional, default = 0
0 = no filtering, 1 = minimum amplitude filter, 2 = minimum
curvature filter.
half : float, optional, default = None
The spherical harmonic degree where the filter is equal to 0.5. This
must be set when filter_type is 1 or 2.
nmax : int, optional, default = 8
The maximum order used in the Taylor-series expansion when calculating
the potential coefficients.
delta_max : float, optional, default = 5.0
The algorithm will continue to iterate until the maximum difference in
relief between solutions is less than this value (in meters).
lmax_calc : int
Maximum spherical harmonic degree when evalulating the functions.
correction : SHGravCoeffs class instance, optional, default = None
If present, these coefficients will be added to the Bouguer correction
(subtracted from the Bouguer anomaly) before performing the inversion.
This could be used to account for the pre-computed gravitational
attraction of the polar caps of Mars, which have a different density
than the crust.
quiet : boolean, optional, default = False
If True, suppress printing output during the iterations.
"""
if (filter_type == 1 or filter_type == 2) and half is None:
raise ValueError("half must be set when filter_type is either 1 or 2.")
if lmax_calc is None:
lmax_calc = lmax
d = topo.coeffs[0, 0, 0] - thickave
mass = pot.mass
topo_grid = topo.expand(grid='DH2', lmax=lmax, extend=False)
if quiet is False:
print("Maximum radius (km) = {:f}".format(topo_grid.data.max() / 1.e3))
print("Minimum radius (km) = {:f}".format(topo_grid.data.min() / 1.e3))
bc, r0 = pysh.gravmag.CilmPlusDH(topo_grid.data, nmax, mass,
rho_c, lmax=lmax_calc)
if correction is not None:
bc += correction.change_ref(r0=r0).to_array(lmax=lmax_calc)
pot2 = pot.change_ref(r0=r0)
ba = pot2.to_array(lmax=lmax_calc, errors=False) - bc
moho = pysh.SHCoeffs.from_zeros(lmax=lmax_calc)
moho.coeffs[0, 0, 0] = d
for l in range(1, lmax_calc + 1):
if filter_type == 0:
moho.coeffs[:, l, :l + 1] = ba[:, l, :l + 1] * mass * \
(2 * l + 1) * ((r0 / d)**l) / \
(4. * np.pi * (rho_m - rho_c) * d**2)
elif filter_type == 1:
moho.coeffs[:, l, :l + 1] = pysh.gravmag.DownContFilterMA(
l, half, r0, d) * ba[:, l, :l + 1] * mass * \
(2 * l + 1) * ((r0 / d)**l) / \
(4. * np.pi * (rho_m - rho_c) * d**2)
else:
moho.coeffs[:, l, :l + 1] = pysh.gravmag.DownContFilterMC(
l, half, r0, d) * ba[:, l, :l + 1] * mass * \
(2 * l + 1) * ((r0 / d)**l) / \
(4. * np.pi * (rho_m - rho_c) * d**2)
moho_grid3 = moho.expand(grid='DH2', lmax=lmax, lmax_calc=lmax_calc,
extend=False)
temp_grid = topo_grid - moho_grid3
if quiet is False:
print('Maximum Crustal thickness (km) = {:e}'.format(
temp_grid.data.max() / 1.e3))
print('Minimum Crustal thickness (km) = {:e}'.format(
temp_grid.data.min() / 1.e3))
moho.coeffs = pysh.gravmag.BAtoHilmDH(ba, moho_grid3.data, nmax,
mass, r0, (rho_m - rho_c),
lmax=lmax,
filter_type=filter_type,
filter_deg=half,
lmax_calc=lmax_calc)
moho_grid2 = moho.expand(grid='DH2', lmax=lmax, lmax_calc=lmax_calc,
extend=False)
temp_grid = topo_grid - moho_grid2
if quiet is False:
print('Delta (km) = {:e}'.format(abs(moho_grid3.data -
moho_grid2.data).max() / 1.e3))
print('Maximum Crustal thickness (km) = {:f}'
.format(temp_grid.data.max() / 1.e3))
print('Minimum Crustal thickness (km) = {:f}'
.format(temp_grid.data.min() / 1.e3))
iter = 0
delta = 1.0e9
while delta > delta_max:
iter += 1
if quiet is False:
print('Iteration {:d}'.format(iter))
moho_grid = (moho_grid2 + moho_grid3) / 2.
temp_grid = topo_grid - moho_grid
if quiet is False:
print("Delta (km) = {:e}".format(
abs(moho_grid.data - moho_grid2.data).max() / 1.e3))
print('Maximum Crustal thickness (km) = {:f}'.format(
temp_grid.data.max() / 1.e3))
print('Minimum Crustal thickness (km) = {:f}'.format(
temp_grid.data.min() / 1.e3))
moho_grid3 = moho_grid2
moho_grid2 = moho_grid
iter += 1
if quiet is False:
print('Iteration {:d}'.format(iter))
moho.coeffs = pysh.gravmag.BAtoHilmDH(ba, moho_grid2.data, nmax,
mass, r0,
(rho_m - rho_c), lmax=lmax,
filter_type=filter_type,
filter_deg=half,
lmax_calc=lmax_calc)
moho_grid = moho.expand(grid='DH2', lmax=lmax, lmax_calc=lmax_calc,
extend=False)
delta = abs(moho_grid.data - moho_grid2.data).max()
temp_grid = topo_grid - moho_grid
if quiet is False:
print('Delta (km) = {:e}'.format(delta / 1.e3))
print('Maximum Crustal thickness (km) = {:f}'.format(
temp_grid.data.max() / 1.e3))
print('Minimum Crustal thickness (km) = {:f}'.format(
temp_grid.data.min() / 1.e3))
moho_grid3 = moho_grid2
moho_grid2 = moho_grid
if abs(temp_grid.data).max() > 500.e3:
print('Not converging')
exit(1)
return moho
# ==== pyMohoRho ====
def pyMohoRho(pot, topo, density, porosity, lmax, rho_m, thickave,
filter_type=0, half=None, nmax=8, delta_max=5., lmax_calc=None,
correction=None, quiet=False):
"""
Calculate the relief along the crust-mantle interface assuming a
constant density mantle and a laterally varying crustal density.
Returns
-------
moho : SHCoeffs class instance
The radius of the crust-mantle interface.
Parameters
----------
pot : SHGravCoeffs class instance
Gravitational potential spherical harmonic coefficients.
topo : SHCoeffs class instance
Spherical harmonic coefficients of the surface relief.
density : SHCoeffs class instance
Spherical harmonic coefficients of the crustal grain density.
porosity : float
Crustal porosity (from 0 to 1).
lmax : int
Maximum spherical harmonic degree of the function, which determines the
sampling interval of the internally computed grids.
rho_m : float
Mantle density in kg / m^3.
thickave : float
Average thickness of the crust in meters.
filter_type : int, optional, default = 0
0 = no filtering, 1 = minimum amplitude filter, 2 = minimum
curvature filter.
half : float, optional, default = None
The spherical harmonic degree where the filter is equal to 0.5. This
must be set when filter_type is 1 or 2.
nmax : int, optional, default = 8
The maximum order used in the Taylor-series expansion when calculating
the potential coefficients.
delta_max : float, optional, default = 5.0
The algorithm will continue to iterate until the maximum difference in
relief between solutions is less than this value (in meters).
lmax_calc : int
Maximum spherical harmonic degree when evalulating the functions.
correction : SHGravCoeffs class instance, optional, default = None
If present, these coefficients will be added to the Bouguer correction
(subtracted from the Bouguer anomaly) before performing the inversion.
This could be used to account for the pre-computed gravitational
attraction of the polar caps of Mars, which have a different density
than the crust.
quiet : boolean, optional, default = False
If True, suppress printing output during the iterations.
"""
if (filter_type == 1 or filter_type == 2) and half is None:
raise ValueError("half must be set when filter_type is either 1 or 2.")
if lmax_calc is None:
lmax_calc = lmax
d = topo.coeffs[0, 0, 0] - thickave
rho_crust_ave = density.coeffs[0, 0, 0] * (1. - porosity)
mass = pot.mass
topo_grid = topo.expand(grid='DH2', lmax=lmax, extend=False)
density_grid = density.expand(grid='DH2', lmax=lmax, extend=False)
if quiet is False:
print("Maximum radius (km) = {:f}".format(topo_grid.data.max() / 1.e3))
print("Minimum radius (km) = {:f}".format(topo_grid.data.min() / 1.e3))
print("Maximum density (kg/m3) = {:f}".format(
density_grid.data.max() / 1.e3))
print("Minimum desntiy (kg/m3) = {:f}".format(
density_grid.data.min() / 1.e3))
bc, r0 = pysh.gravmag.CilmPlusRhoHDH(
topo_grid.data, nmax, mass, density_grid.data * (1. - porosity),
lmax=lmax_calc)
if correction is not None:
bc += correction.change_ref(r0=r0).to_array(lmax=lmax_calc)
pot2 = pot.change_ref(r0=r0)
ba = pot2.to_array(lmax=lmax_calc, errors=False) - bc
# next subtract lateral variations in the crust without reflief
for l in range(1, lmax_calc + 1):
ba[:, l, :l + 1] = ba[:, l, :l + 1] \
- 4. * np.pi * density.coeffs[:, l, :l + 1] \
* (1. - porosity) \
* (r0**3 - (d**3)*(d/r0)**l) \
/ (2 * l + 1) / (l + 3) / | |
<reponame>dwillmer/pyquil<gh_stars>1-10
##############################################################################
# Copyright 2016-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import re
import time
import warnings
from json.decoder import JSONDecodeError
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
import requests
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from pyquil.api._config import PyquilConfig
from pyquil.api._error_reporting import _record_call
from pyquil.api._errors import (
error_mapping,
ApiError,
UserMessageError,
UnknownApiError,
TooManyQubitsError,
)
from pyquil.api._logger import logger
from pyquil.quil import Program
from pyquil.version import __version__
from pyquil.wavefunction import Wavefunction
TYPE_EXPECTATION = "expectation"
TYPE_MULTISHOT = "multishot"
TYPE_MULTISHOT_MEASURE = "multishot-measure"
TYPE_WAVEFUNCTION = "wavefunction"
def get_json(session: requests.Session, url: str, params: Optional[Dict[Any, Any]] = None) -> Any:
"""
Get JSON from a Forest endpoint.
"""
logger.debug("Sending GET request to %s. Params: %s", url, params)
res = session.get(url, params=params)
if res.status_code >= 400:
raise parse_error(res)
return res.json()
def post_json(session: requests.Session, url: str, json: Any) -> requests.models.Response:
"""
Post JSON to the Forest endpoint.
"""
logger.debug("Sending POST request to %s. Body: %s", url, json)
res = session.post(url, json=json)
if res.status_code >= 400:
raise parse_error(res)
return res
def parse_error(res: requests.Response) -> ApiError:
"""
Every server error should contain a "status" field with a human readable explanation of
what went wrong as well as a "error_type" field indicating the kind of error that can be mapped
to a Python type.
There's a fallback error UnknownError for other types of exceptions (network issues, api
gateway problems, etc.)
"""
try:
body = res.json()
except JSONDecodeError:
raise UnknownApiError(res.text)
if "error_type" not in body:
raise UnknownApiError(str(body))
error_type = body["error_type"]
status = body["status"]
if re.search(r"[0-9]+ qubits were requested, but the QVM is limited to [0-9]+ qubits.", status):
return TooManyQubitsError(status)
error_cls = error_mapping.get(error_type, UnknownApiError)
return error_cls(status)
def get_session(*args: Any, **kwargs: Any) -> "ForestSession":
"""
Create a requests session to access the REST API
:return: requests session
:rtype: Session
"""
session = ForestSession(*args, **kwargs)
retry_adapter = HTTPAdapter(
max_retries=Retry(
total=3,
method_whitelist=["POST"],
status_forcelist=[502, 503, 504, 521, 523],
backoff_factor=0.2,
raise_on_status=False,
)
)
session.mount("http://", retry_adapter)
session.mount("https://", retry_adapter)
# We need this to get binary payload for the wavefunction call.
session.headers.update({"Accept": "application/octet-stream"})
session.headers.update({"Content-Type": "application/json; charset=utf-8"})
return session
def validate_noise_probabilities(noise_parameter: Optional[List[float]]) -> None:
"""
Is noise_parameter a valid specification of noise probabilities for depolarizing noise?
:param list noise_parameter: List of noise parameter values to be validated.
"""
if not noise_parameter:
return
if not isinstance(noise_parameter, list):
raise TypeError("noise_parameter must be a list")
if any([not isinstance(value, float) for value in noise_parameter]):
raise TypeError("noise_parameter values should all be floats")
if len(noise_parameter) != 3:
raise ValueError("noise_parameter lists must be of length 3")
if sum(noise_parameter) > 1 or sum(noise_parameter) < 0:
raise ValueError("sum of entries in noise_parameter must be between 0 and 1 (inclusive)")
if any([value < 0 for value in noise_parameter]):
raise ValueError("noise_parameter values should all be non-negative")
def validate_qubit_list(qubit_list: Sequence[int]) -> Sequence[int]:
"""
Check the validity of qubits for the payload.
:param qubit_list: List of qubits to be validated.
"""
if not isinstance(qubit_list, Sequence):
raise TypeError("'qubit_list' must be of type 'Sequence'")
if any(not isinstance(i, int) or i < 0 for i in qubit_list):
raise TypeError("'qubit_list' must contain positive integer values")
return qubit_list
def prepare_register_list(
register_dict: Dict[str, Union[bool, Sequence[int]]]
) -> Dict[str, Union[bool, Sequence[int]]]:
"""
Canonicalize classical addresses for the payload and ready MemoryReference instances
for serialization.
This function will cast keys that are iterables of int-likes to a list of Python
ints. This is to support specifying the register offsets as ``range()`` or numpy
arrays. This mutates ``register_dict``.
:param register_dict: The classical memory to retrieve. Specified as a dictionary:
the keys are the names of memory regions, and the values are either (1) a list of
integers for reading out specific entries in that memory region, or (2) True, for
reading out the entire memory region.
"""
if not isinstance(register_dict, dict):
raise TypeError("register_dict must be a dict but got " + repr(register_dict))
for k, v in register_dict.items():
if isinstance(v, bool):
assert v # If boolean v must be True
continue
indices = [int(x) for x in v] # support ranges, numpy, ...
if not all(x >= 0 for x in indices):
raise TypeError("Negative indices into classical arrays are not allowed.")
register_dict[k] = indices
return register_dict
def run_and_measure_payload(
quil_program: Program, qubits: Sequence[int], trials: int, random_seed: int
) -> Dict[str, object]:
"""REST payload for :py:func:`ForestConnection._run_and_measure`"""
if not quil_program:
raise ValueError(
"You have attempted to run an empty program."
" Please provide gates or measure instructions to your program."
)
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Quil program object")
qubits = validate_qubit_list(qubits)
if not isinstance(trials, int):
raise TypeError("trials must be an integer")
payload = {
"type": TYPE_MULTISHOT_MEASURE,
"qubits": list(qubits),
"trials": trials,
"compiled-quil": quil_program.out(calibrations=False),
}
if random_seed is not None:
payload["rng-seed"] = random_seed
return payload
def wavefunction_payload(quil_program: Program, random_seed: int) -> Dict[str, object]:
"""REST payload for :py:func:`ForestConnection._wavefunction`"""
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Quil program object")
payload: Dict[str, object] = {
"type": TYPE_WAVEFUNCTION,
"compiled-quil": quil_program.out(calibrations=False),
}
if random_seed is not None:
payload["rng-seed"] = random_seed
return payload
def expectation_payload(
prep_prog: Program, operator_programs: Optional[Iterable[Program]], random_seed: int
) -> Dict[str, object]:
"""REST payload for :py:func:`ForestConnection._expectation`"""
if operator_programs is None:
operator_programs = [Program()]
if not isinstance(prep_prog, Program):
raise TypeError("prep_prog variable must be a Quil program object")
payload: Dict[str, object] = {
"type": TYPE_EXPECTATION,
"state-preparation": prep_prog.out(calibrations=False),
"operators": [x.out(calibrations=False) for x in operator_programs],
}
if random_seed is not None:
payload["rng-seed"] = random_seed
return payload
def qvm_run_payload(
quil_program: Program,
classical_addresses: Dict[str, Union[bool, Sequence[int]]],
trials: int,
measurement_noise: Optional[Tuple[float, float, float]],
gate_noise: Optional[Tuple[float, float, float]],
random_seed: Optional[int],
) -> Dict[str, object]:
"""REST payload for :py:func:`ForestConnection._qvm_run`"""
if not quil_program:
raise ValueError(
"You have attempted to run an empty program."
" Please provide gates or measure instructions to your program."
)
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Quil program object")
classical_addresses = prepare_register_list(classical_addresses)
if not isinstance(trials, int):
raise TypeError("trials must be an integer")
payload = {
"type": TYPE_MULTISHOT,
"addresses": classical_addresses,
"trials": trials,
"compiled-quil": quil_program.out(calibrations=False),
}
if measurement_noise is not None:
payload["measurement-noise"] = measurement_noise
if gate_noise is not None:
payload["gate-noise"] = gate_noise
if random_seed is not None:
payload["rng-seed"] = random_seed
return payload
class ForestSession(requests.Session):
"""
ForestSession inherits from requests.Session. It is responsible for adding
authentication headers to Forest server requests. Upon receiving a 401 or 403
response, it will attempt to refresh the auth credential and update the
PyquilConfig, which in turn writes the refreshed auth credential to file.
Encapsulates the operations required for authorization & encryption
with the QPU.
Two operations are involved in authorization:
* Requesting & storing a user authentication token, used to authenticate calls
to Forest, Dispatch, and other Rigetti services
* Requesting a Curve ZeroMQ keypair for connection to the QPU. The response to
this request also comes with service endpoints: compiler server and QPU
The authentication tokens are of the standard JWT format and are issued by Forest Server.
The refresh token is only used to renew the access token, which is used for all transactions
and is valid for a short period of time.
In wrapping the PyQuilConfig object, it provides that object with a callback to
retrieve a valid engagement when needed, because the engagement is maintained here
but is used by the config to provide service endpoints.
"""
def __init__(self, *, config: PyquilConfig, lattice_name: Optional[str] = None):
super().__init__()
self.config = config
self.config.get_engagement = self.get_engagement
self._engagement: Optional["Engagement"] = None
self.headers.update(self.config.qcs_auth_headers)
self.headers["User-Agent"] = f"PyQuil/{__version__}"
self.lattice_name = lattice_name
def _engage(self) -> Optional["Engagement"]:
"""
The heart of the QPU authorization process, ``engage`` makes a request to
the dispatch server for the information needed to communicate with the QPU.
This is a standard GraphQL request, authenticated using the access token
retrieved from Forest Server.
The response includes the endpoints to | |
accepts a 2d array with a *single entry* as its argument and returns a scalar. The single
entry is the TransferMechanism's current `value <Mechanism_Base.value>` (that is, its previous_value
is ignored). After each execution, the function is passed the Mechanism's current `value <Mechanism_Base.value>`,
and the scalar returned is compared to **termination_threshold** using the comparison operator specified by
**termination_comparison_op**. Execution continues until this returns True, as in the following example::
>>> my_mech = pnl.TransferMechanism(size=2,
... integrator_mode=True,
... termination_measure=max,
... termination_threshold=0.9,
... termination_comparison_op=pnl.GREATER_THAN_OR_EQUAL)
>>> my_mech.execute([0.5, 1])
array([[0.46875, 0.9375 ]])
>>> my_mech.num_executions_before_finished
4
Here, ``my_mech`` continued to execute for ``5`` times, until the element of the Mechanism's `value
<Mechanism_Base.value>` with the greatest value exceeded ``0.9``. Note that GREATER_THAN_EQUAL is a keyword for
the string ">=", which is a key in the `comparison_operators` dict for the Python ``operator.ge``; any of these
can be used to specify **termination_comparison_op**).
.. _TransferMechanism_Examples_Termination_By_Time:
*Termination by time*. This terminates execution when the Mechanism has executed at least a number of times equal
to the **threshold** at a particular TimeScale (e.g., within a `RUN` or a `TRIAL <TimeScale.TRIAL>`). This is
specified by assigning a `TimeScale` to **termination_measure**; execution terminates when the number of
executions at that TimeScale equals the **termination_threshold**. Note that, in this case,
the **termination_comparison_op** argument is ignored (the `termination_comparison_op
<TransferMechanism.termination_comparison_op>` is automatically set to *GREATER_THAN_OR_EQUAL*). For example,
``my_mech`` is configured below to execute at least twice per trial::
>>> my_mech = pnl.TransferMechanism(size=2,
... integrator_mode=True,
... termination_measure=TimeScale.TRIAL,
... termination_threshold=2)
>>> my_mech.execute([0.5, 1])
array([[0.375, 0.75 ]])
>>> my_mech.num_executions_before_finished
2
As noted `above <TransferMechanism_Continued_Execution>`, it will continue to execute if it is called again,
but only once per call::
>>> my_mech.execute([0.5, 1])
array([[0.4375, 0.875 ]])
>>> my_mech.num_executions_before_finished
1
>>> my_mech.execute([0.5, 1])
array([[0.46875, 0.9375 ]])
>>> my_mech.num_executions_before_finished
1
In the following example, this behavior is exploited to allow a recurrent form of TransferMechanism (``attention``)
to integrate for a fixed number of steps (e.g., to simulate the time taken to encode an instruction regarding the
which feature of the stimulus should be attended) before a stimulus is presented, and then allowing that
Mechanism to continue to integrate the instruction and impact stimulus processing once the stimulus is presented::
>>> stim_input = pnl.ProcessingMechanism(size=2)
>>> stim_percept = pnl.TransferMechanism(size=2, function=pnl.Logistic)
>>> decision = pnl.TransferMechanism(name='Decision', size=2,
... integrator_mode=True,
... execute_until_finished=False,
... termination_threshold=0.65,
... termination_measure=max,
... termination_comparison_op=pnl.GREATER_THAN)
>>> instruction_input = pnl.ProcessingMechanism(size=2, function=pnl.Linear(slope=10))
>>> attention = pnl.LCAMechanism(name='Attention', size=2, function=pnl.Logistic,
... leak=8, competition=8, self_excitation=0, time_step_size=.1,
... termination_threshold=3,
... termination_measure = pnl.TimeScale.TRIAL)
>>> response = pnl.ProcessingMechanism(name='Response', size=2)
...
>>> comp = pnl.Composition()
>>> comp.add_linear_processing_pathway([stim_input, [[1,-1],[-1,1]], stim_percept, decision, response]) #doctest: +SKIP
>>> comp.add_linear_processing_pathway([instruction_input, attention, stim_percept]) #doctest: +SKIP
>>> comp.scheduler.add_condition(response, pnl.WhenFinished(decision)) #doctest: +SKIP
...
>>> stim_percept.set_log_conditions([pnl.RESULT])
>>> attention.set_log_conditions([pnl.RESULT])
>>> decision.set_log_conditions([pnl.RESULT])
>>> response.set_log_conditions(['OutputPort-0'])
...
>>> inputs = {stim_input: [[1, 1], [1, 1]],
... instruction_input: [[1, -1], [-1, 1]]}
>>> comp.run(inputs=inputs) # doctest: +SKIP
This example implements a simple model of attentional selection in perceptual decision making. In the model,
``stim_input`` represents the stimulus input, which is passed to ``stim_percept``, which also receives input
from the ``attention`` Mechanism. ``stim_percpt passes its output to ``decision``, which integrates its input
until one of the state_features of the input (the first or second) reaches the threshold of 0.65, at which point
``response`` executes (specified by the condition ``(reponse, WhenFinished(decision)``). In addition to the
``stim_input``, the model an instruction on each trial in ``instruction_input`` that specifies which feature of
the stimulus (i.e., the first or second element) should be "attended". This is passed to the ``attention``
Mechanism, which uses it to select which feature of ``stim_percept`` should be passed to ``decision``, and thereby
determine the response. Like the ``decision`` Mechanism, the ``attention`` Mechanism integrates its input.
However, its **threshold_measure** is specified as ``TimeScale.TRIAL`` and its **threshold** as ``3``, so it
carries out 3 steps of integration the first time it is executed in each trial. Thus, when the input is presented
at the beginning of each trial, first ``stim_input`` and ``instruction_input`` execute. Then ``attention``
executes, but ``stim_percept`` does not yet do so, since it receives input from ``attention`` and thus must wait
for that to execute first. When ``attention`` executes, it carries out its three steps of integration,
(giving it a chance to "encode" the instruction before the stimulus is processed by ``stim_percept``). Then
``stim_percept`` executes, followed by ``decision``. However, the latter carries out only one step of integration,
since its **execute_until_finished** is set to False. If its output does not meet its termination condition after
that one step of integration, then ``response`` does not execute, since it has been assigned a condition that
requires ``decision`` to terminate before it does so. As a result, since ``response`` has not executed, the trial
continues. On the next pass, ``attention`` carries out only one step of integration, since its termination
condition has already been met, as does ``decision`` since its termination condition has *not* yet been met. If
it is met, then ``response`` executes and the trial ends (since all Mechanisms have now had an opportunity to
execute). The value of the ``attention`` and ``decision`` Mechanisms after each execution are shown below::
>>> attention.log.print_entries(display=[pnl.TIME, pnl.VALUE]) #doctest: +SKIP
Log for Attention:
Logged Item: Time Value
'RESULT' 0:0:0:1 [0.64565631 0.19781611] # Trial 0
'RESULT' 0:0:0:1 [0.72347147 0.1422746 ]
'RESULT' 0:0:0:1 [0.74621565 0.1258587 ]
'RESULT' 0:0:1:1 [0.75306362 0.1208305 ]
'RESULT' 0:0:2:1 [0.75516272 0.11926922]
'RESULT' 0:0:3:1 [0.75581168 0.11878318]
'RESULT' 0:0:4:1 [0.75601306 0.11863188]
'RESULT' 0:1:0:1 [0.2955214 0.49852489] # Trial 1
'RESULT' 0:1:0:1 [0.17185129 0.68187518]
'RESULT' 0:1:0:1 [0.13470156 0.73399742]
'RESULT' 0:1:1:1 [0.1235536 0.74936691]
'RESULT' 0:1:2:1 [0.12011584 0.75402671]
>>> decision.log.print_entries(display=[pnl.TIME, pnl.VALUE]) #doctest: +SKIP
Log for Decision:
Logged Item: Time Value
'RESULT' 0:0:0:3 [0.33917677 0.2657116 ] # Trial 0
'RESULT' 0:0:1:3 [0.50951133 0.39794126]
'RESULT' 0:0:2:3 [0.59490696 0.46386164]
'RESULT' 0:0:3:3 [0.63767534 0.49676128]
'RESULT' 0:0:4:3 [0.65908142 0.51319226]
'RESULT' 0:1:0:3 [0.59635299 0.59443706] # Trial 1
'RESULT' 0:1:1:3 [0.56360108 0.6367389 ]
'RESULT' 0:1:2:3 [0.54679699 0.65839718]
>>> response.log.print_entries(display=[pnl.TIME, pnl.VALUE]) #doctest: +SKIP
Log for Response:
Logged Item: Time Value
'OutputPort-0' 0:0:4:4 [0.65908142 0.51319226] # Trial 0
'OutputPort-0' 0:1:2:4 [0.54679699 0.65839718] # Trial 1
The `Time` signatures are ``run:trial:pass:time_step``. Note that ``attention`` always executes in `time_step` 1
(after ``stim_input`` and ``instruction_input`` which execute in time_step 0). In trial 0, ``attention``
executes three times in pass 0 (to reach its specified threshold), and then again in passes 1, 2 and 3 and 4
along with ``decision`` (which executes in time_step 3, after ``stim_percept`` in time_step 2),
as the trial continues and ``decision`` executes until reaching its threshold. Note that ``response`` executed
only executed in pass 4, since it depends on the termination of ``decision``. Note also that in trial 1
``attention`` executes 3 times in pass 0 as it did in trial 0; however, ``decision`` executes only 3 times
since it begins closer to its threshold in that trial.
.. _TransferMechanism_Class_Reference:
Class Reference
---------------
"""
import copy
import inspect
import logging
import numbers
import types
import warnings
from collections.abc import Iterable
import numpy as np
import typecheck as tc
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, SUM
from psyneulink.core.components.functions.nonstateful.distributionfunctions import DistributionFunction
from psyneulink.core.components.functions.function import Function, is_function_type
from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance
from psyneulink.core.components.functions.nonstateful.selectionfunctions import SelectionFunction
from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator
from psyneulink.core.components.functions.stateful.integratorfunctions import IntegratorFunction
from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Logistic, TransferFunction
from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction
from psyneulink.core.components.mechanisms.mechanism import Mechanism, MechanismError
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec
from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base
from psyneulink.core.components.ports.inputport import InputPort
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.keywords import \
COMBINE, comparison_operators, EXECUTION_COUNT, FUNCTION, GREATER_THAN_OR_EQUAL, \
CURRENT_VALUE, LESS_THAN_OR_EQUAL, MAX_ABS_DIFF, \
NAME, NOISE, NUM_EXECUTIONS_BEFORE_FINISHED, OWNER_VALUE, RESET, RESULT, RESULTS, \
SELECTION_FUNCTION_TYPE, TRANSFER_FUNCTION_TYPE, TRANSFER_MECHANISM, VARIABLE
from psyneulink.core.globals.parameters import Parameter, FunctionParameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities import \
all_within_range, append_type_to_name, iscompatible, is_comparison_operator, convert_to_np_array, safe_equals
from psyneulink.core.scheduling.time import TimeScale
__all__ = [
'INITIAL_VALUE', 'CLIP', 'INTEGRATOR_FUNCTION', 'INTEGRATION_RATE',
'TERMINATION_THRESHOLD', 'TERMINATION_MEASURE', 'TERMINATION_MEASURE_VALUE',
'Transfer_DEFAULT_BIAS', 'Transfer_DEFAULT_GAIN', 'Transfer_DEFAULT_LENGTH', 'Transfer_DEFAULT_OFFSET',
'TransferError', 'TransferMechanism',
]
# TransferMechanism parameter keywords:
CLIP = "clip"
INTEGRATOR_FUNCTION = 'integrator_function'
INTEGRATION_RATE = "integration_rate"
INITIAL_VALUE = 'initial_value'
TERMINATION_THRESHOLD = 'termination_threshold'
TERMINATION_MEASURE = 'termination_measure'
TERMINATION_MEASURE_VALUE = 'termination_measure_value'
termination_keywords = [EXECUTION_COUNT, NUM_EXECUTIONS_BEFORE_FINISHED]
# TransferMechanism default parameter values:
Transfer_DEFAULT_LENGTH = 1
Transfer_DEFAULT_GAIN = 1
Transfer_DEFAULT_BIAS = 0
Transfer_DEFAULT_OFFSET = 0
# Transfer_DEFAULT_RANGE = np.array([])
logger = logging.getLogger(__name__)
class TransferError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
def _integrator_mode_setter(value, owning_component=None, context=None):
if value:
if not owning_component.parameters.integrator_mode._get(context):
# when first creating parameters, integrator_function is not
# instantiated yet
if (
not owning_component.is_initializing
and owning_component.integrator_function.parameters.execution_count._get(context) > 0
):
# force, because integrator_mode is currently False
# (will be set after exiting this method)
if owning_component.on_resume_integrator_mode == CURRENT_VALUE:
owning_component.reset(
| |
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_search_schedule_slots(self):
"""Tests if this user can perform ``ScheduleSlots`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_federated_calendar_view(self):
"""Federates the view for methods in this session.
A federated view will include schedules in calendars which are
children of this calendar in the calendar hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_calendar_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this calendar only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_sequestered_schedule_slot_view(self):
"""The returns from the search methods omit sequestered schedule slots.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_unsequestered_schedule_slot_view(self):
"""All schedule slots are returned including sequestered schedule slots.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_schedule_slot_query(self):
"""Gets a schedule slot query.
:return: the schedule slot query
:rtype: ``osid.calendaring.ScheduleSlotQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotQuery
schedule_slot_query = property(fget=get_schedule_slot_query)
@abc.abstractmethod
def get_schedule_slots_by_query(self, schedule_slot_query):
"""Gets a list of ``ScheduleSlots`` matching the given schedule slot query.
:param schedule_slot_query: the schedule slot query
:type schedule_slot_query: ``osid.calendaring.ScheduleSlotQuery``
:return: the returned ``ScheduleSlotList``
:rtype: ``osid.calendaring.ScheduleSlotList``
:raise: ``NullArgument`` -- ``schedule_slot_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``schedule_slot_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotList
class ScheduleSlotSearchSession:
"""This session provides methods for searching ``ScheduleSlot`` objects.
The search query is constructed using the ``ScheduleSlotQuery``. The
schedule slot record ``Type`` also specifies the record for the
schedule slot query.
``get_schedule_slots_by_query()`` is the basic search method and
returns a list of ``ScheduleSlots``. A more advanced search may be
performed with ``getScheduleSlotsBySearch()``. It accepts a
``ScheduleSlotSearch`` in addition to the query for the purpose of
specifying additional options affecting the entire search, such as
ordering. ``get_schedule_slots_by_search()`` returns a
``ScheduleSlotSearchResults`` that can be used to access the
resulting ``ScheduleSlotsList`` or be used to perform a search
within the result set through ``ScheduleSlotSearch``.
This session defines views that offer differing behaviors for
searching.
* federated calendar view: searches include schedule slots in
calendars of which this calendar is a ancestor in the calendar
hierarchy
* isolated calendar view: searches are restricted to schedule
slots in this calendar
Schedule slots may have a query record indicated by their respective
record types. The query record is accessed via the
``ScheduleSlotQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_schedule_slot_search(self):
"""Gets a schedule slot search.
:return: the schedule slot search
:rtype: ``osid.calendaring.ScheduleSlotSearch``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotSearch
schedule_slot_search = property(fget=get_schedule_slot_search)
@abc.abstractmethod
def get_schedule_slot_search_order(self):
"""Gets a schedule slot search order.
The ``ScheduleSlotSearchOrder`` is supplied to a
``ScheduleSlotSearch`` to specify the ordering of results.
:return: the schedule slot search order
:rtype: ``osid.calendaring.ScheduleSlotSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotSearchOrder
schedule_slot_search_order = property(fget=get_schedule_slot_search_order)
@abc.abstractmethod
def get_schedule_slots_by_search(self, schedule_slot_query, schedule_slot_search):
"""Gets the search results matching the given search query using the given search.
:param schedule_slot_query: the schedule slot query
:type schedule_slot_query: ``osid.calendaring.ScheduleSlotQuery``
:param schedule_slot_search: the schedule slot search
:type schedule_slot_search: ``osid.calendaring.ScheduleSlotSearch``
:return: the schedule slot search results
:rtype: ``osid.calendaring.ScheduleSearchResults``
:raise: ``NullArgument`` -- ``schedule_slot_query`` or ``schedule_slot_search`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``schedule_slot_search`` or ``schedule_slot_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSearchResults
@abc.abstractmethod
def get_schedule_slot_query_from_inspector(self, schedule_slot_query_inspector):
"""Gets a schedule slot query from an inspector.
The inspector is available from an
``ScheduleSlotSearchResults``.
:param schedule_slot_query_inspector: a schedule slot query inspector
:type schedule_slot_query_inspector: ``osid.calendaring.ScheduleSlotQueryInspector``
:return: the schedule query
:rtype: ``osid.calendaring.ScheduleSlotQuery``
:raise: ``NullArgument`` -- ``schedule_slot_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``schedule_slot_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotQuery
class ScheduleSlotAdminSession:
"""This session creates, updates, and deletes ``Schedule Slots``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create a
``ScheduleSlot,`` a ``ScheduleSlotForm`` is requested using
``get_schedule_slot_form_for_create()`` specifying the desired
record ``Types`` or none if no record ``Types`` are needed. The
returned ``ScheduleSlotForm`` will indicate that it is to be used
with a create operation and can be used to examine metdata or
validate data prior to creation. Once the ``ScheduleSlotForm`` is
submiited to a create operation, it cannot be reused with another
create operation unless the first operation was unsuccessful. Each
``ScheduleSlotForm`` corresponds to an attempted transaction.
For updates, ``ScheduleSlotForms`` are requested to the
``ScheduleSlot`` ``Id`` that is to be updated using
``getScheduleSlotFormForUpdate()``. Similarly, the
``ScheduleSlotForm`` has metadata about the data that can be updated
and it can perform validation before submitting the update. The
``ScheduleSlotForm`` can only be used once for a successful update
and cannot be reused.
The delete operations delete ``Schedule Slots``. To unmap a
``ScheduleSlot`` from the current ``Calendar,`` the
``ScheduleSlotCalendarAssignmentSession`` should be used. These
delete operations attempt to remove the ``ScheduleSlot`` itself thus
removing it from all known ``Calendar`` catalogs.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_id(self):
"""Gets the ``Calendar`` ``Id`` associated with this session.
:return: the ``Calendar Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
calendar_id = property(fget=get_calendar_id)
@abc.abstractmethod
def get_calendar(self):
"""Gets the ``Calendar`` associated with this session.
:return: the ``Calendar`` associated with this session
:rtype: ``osid.calendaring.Calendar``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.Calendar
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_create_schedule_slots(self):
"""Tests if this user can create ``ScheduleSlots``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a
``ScheduleSlot`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
create operations to an unauthorized user.
:return: ``false`` if ``ScheduleSlot`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def can_create_schedule_slot_with_record_types(self, schedule_slot_record_types):
"""Tests if this user can create a single ``ScheduleSlot`` using the desired record types.
While ``CalendaringManager.getScheduleSlotRecordTypes()`` can be
used to examine which records are supported, this method tests
which record(s) are required for creating a specific
``ScheduleSlot``. Providing an empty array tests if a
``ScheduleSlot`` can be created with no records.
:param schedule_slot_record_types: array of schedule slot record types
:type schedule_slot_record_types: ``osid.type.Type[]``
:return: ``true`` if ``ScheduleSlot`` creation using the specified record ``Types`` is supported, ``false``
otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``schedule_slot_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_schedule_slot_form_for_create(self, schedule_slot_record_types):
"""Gets the schedule slot form for creating new schedule slots.
A new form should be requested for each create transaction.
:param schedule_slot_record_types: array of schedule slot record types
:type schedule_slot_record_types: ``osid.type.Type[]``
:return: the schedule slot form
:rtype: ``osid.calendaring.ScheduleSlotForm``
:raise: ``NullArgument`` -- ``schedule_slot_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.ScheduleSlotForm
@abc.abstractmethod
def | |
treatment is required
e1 = ie1
e2 = ie2
else:
if (isinstance(ie1, unicode)):
e1 = ie1.encode('utf-8')
else:
e1 = ie1
if (isinstance(ie2, unicode)):
e2 = ie2.encode('utf-8')
else:
e2 = ie2
# The keys in key_translate represent a concatenated string. We should split
# these strings and then compare the values
key_translate = ['MEMBER_INTERFACES', 'CONF', 'PEER1_MEMBER_INTERFACES', 'PEER2_MEMBER_INTERFACES', 'PEER1_PO_CONF', 'PEER2_PO_CONF']
# Some keys have values given as a list which is encoded into a
# string. So split that up into list and then use 'set' to process
# the same irrespective of the order of elements
if (k in key_translate):
# CONF, PEER1_PO_CONF and PEER2_PO_CONF has '\n' joining the commands
# MEMBER_INTERFACES, PEER1_MEMBER_INTERFACES, and PEER2_MEMBER_INTERFACES
# have ',' joining differnet elements. So use a multi-delimiter split
# to split with any delim
t_e1 = set(re.split(r'[\n,]', e1.strip()))
t_e2 = set(re.split(r'[\n,]', e2.strip()))
else:
if (isinstance(e1, str)):
t_e1 = e1.lower()
else:
t_e1 = e1
if (isinstance(e2, str)):
t_e2 = e2.lower()
else:
t_e2 = e2
if (t_e1 != t_e2):
if ((state == 'replaced') or (state == 'overridden')):
return 'add'
elif (state == 'merged'):
# If the key is included in config, then use the value from want.
# If the key is not included in config, then use the value from
# have.
# Match and find the corresponding PB input.
match_pb = [pb for pb in self.pb_input if ((name.lower() == pb['ifname'].lower()) and
(sno == pb['sno']) and
(fabric == pb['fabric']))]
pb_keys = list(match_pb[0].keys())
if (self.keymap[k] not in pb_keys):
# Copy the value from have, because for 'merged' state we
# should leave values that are not specified in config as is.
# We copy 'have' because, the validate input would have defaulted the
# values for non-mandatory objects.
return 'copy_and_add'
else:
return 'add'
return 'dont_add'
def dcnm_intf_can_be_added (self, want):
name = want['interfaces'][0]['ifName']
sno = want['interfaces'][0]['serialNumber']
fabric = want['interfaces'][0]['fabricName']
match_have = [have for have in self.have_all if ((name.lower() == have['ifName'].lower()) and
(sno == have['serialNo']) and
(fabric == have['fabricName']))]
if (match_have):
if ((match_have[0]['complianceStatus'] != 'In-Sync') and
(match_have[0]['complianceStatus'] != 'Pending')):
return True
else:
return False
return True
def dcnm_intf_compare_want_and_have (self, state):
for want in self.want:
delem = {}
action = ''
new = False
add = False
name = want['interfaces'][0]['ifName']
sno = want['interfaces'][0]['serialNumber']
fabric = want['interfaces'][0]['fabricName']
deploy = want['deploy']
intf_changed = False
want.pop('deploy')
match_have = [d for d in self.have if ((name.lower() == d['interfaces'][0]['ifName'].lower()) and
(sno == d['interfaces'][0]['serialNumber']))]
if (not match_have):
changed_dict = copy.deepcopy(want)
if ((state == 'merged') or (state == 'replaced') or (state == 'overridden')):
action = 'add'
else:
wkeys = list(want.keys())
if ('skipResourceCheck' in wkeys):
wkeys.remove('skipResourceCheck')
if ('interfaceType' in wkeys):
wkeys.remove('interfaceType')
for d in match_have:
changed_dict = copy.deepcopy(want)
if ('skipResourceCheck' in changed_dict.keys()):
changed_dict.pop('skipResourceCheck')
# First check if the policies are same for want and have. If they are different, we cannot compare
# the profiles because each profile will have different elements. As per PRD, if policies are different
# we should not merge the information. For now we will assume we will oerwrite the same. Don't compare
# rest of the structure. Overwrite with waht ever is in want
if (want['policy'] != d['policy']):
action = 'update'
continue
else :
for k in wkeys:
if (k == 'interfaces'):
if_keys = list(want[k][0].keys())
if_keys.remove('interfaceType')
changed_dict[k][0].pop('interfaceType')
# 'have' will not contain the fabric name object. So do not try to compare that. This
# is especially true for Ethernet interfaces. Since a switch can belong to only one fabric
# the serial number should be unique across all fabrics
if_keys.remove('fabricName')
changed_dict[k][0].pop('fabricName')
for ik in if_keys:
if (ik == 'nvPairs'):
nv_keys = list(want[k][0][ik].keys())
for nk in nv_keys:
# HAVE may have an entry with a list # of interfaces. Check all the
# interface entries for a match. Even if one entry matches do not
# add the interface
for index in range (len(d[k])):
res = self.dcnm_intf_compare_elements (name, sno, fabric,
want[k][0][ik][nk],
d[k][index][ik][nk], nk, state)
if (res == 'dont_add'):
break
if (res == 'copy_and_add'):
want[k][0][ik][nk] = d[k][0][ik][nk]
changed_dict[k][0][ik][nk] = d[k][0][ik][nk]
if (res != 'dont_add'):
action = 'update'
else:
# Keys and values match. Remove from changed_dict
changed_dict[k][0][ik].pop(nk)
else:
# HAVE may have an entry with a list # of interfaces. Check all the
# interface entries for a match. Even if one entry matches do not
# add the interface
for index in range (len(d[k])):
res = self.dcnm_intf_compare_elements (name, sno, fabric,
want[k][0][ik],
d[k][0][ik], ik, state)
if (res == 'dont_add'):
break
if (res == 'copy_and_add'):
want[k][0][ik] = d[k][0][ik]
changed_dict[k][0][ik] = d[k][0][ik]
if (res != 'dont_add'):
action = 'update'
else:
# Keys and values match. Remove from changed_dict
if (ik != 'ifName'):
changed_dict[k][0].pop(ik)
else:
res = self.dcnm_intf_compare_elements (name, sno, fabric,
want[k], d[k], k, state)
if (res == 'copy_and_add'):
want[k] = d[k]
changed_dict[k] = d[k]
if (res != 'dont_add'):
action = 'update'
else:
# Keys and values match. Remove from changed_dict.
changed_dict.pop(k)
if (action == 'add'):
self.dcnm_intf_merge_intf_info(want, self.diff_create)
# Add the changed_dict to self.changed_dict
self.changed_dict[0][state].append(changed_dict)
intf_changed = True
elif (action == 'update'):
# Remove the 'interfaceType' key from 'want'. It is not required for 'replace'
if (want.get('interfaceType', None) != None):
want.pop('interfaceType')
self.dcnm_intf_merge_intf_info(want, self.diff_replace)
# Add the changed_dict to self.changed_dict
self.changed_dict[0][state].append(changed_dict)
intf_changed = True
# if deploy flag is set to True, add the information so that this interface will be deployed
if (str(deploy) == 'True'):
# Add to diff_deploy,
# 1. if intf_changed is True
# 2. if intf_changed is Flase, then if 'complianceStatus is
# False then add to diff_deploy.
# 3. Do not add otherwise
if (False is intf_changed):
rc = self.dcnm_intf_can_be_added (want)
else:
rc = True
if (True is rc):
delem['serialNumber'] = sno
delem['ifName'] = name
self.diff_deploy.append(delem)
self.changed_dict[0]['deploy'].append(copy.deepcopy(delem))
def dcnm_intf_get_diff_replaced(self):
self.diff_create = []
self.diff_delete = [[],[],[],[],[]]
self.diff_deploy = []
self.diff_replace = []
for cfg in self.config:
self.dcnm_intf_process_config(cfg)
# Compare want[] and have[] and build a list of dicts containing interface information that
# should be sent to DCNM for updation. The list can include information on interfaces which
# are already presnt in self.have and which differ in the values for atleast one of the keys
self.dcnm_intf_compare_want_and_have ('replaced')
def dcnm_intf_get_diff_merge(self):
self.diff_create = []
self.diff_delete = [[],[],[],[],[]]
self.diff_deploy = []
self.diff_replace = []
for cfg in self.config:
self.dcnm_intf_process_config(cfg)
# Compare want[] and have[] and build a list of dicts containing interface information that
# should be sent to DCNM for updation. The list can include information on new interfaces or
# information regarding interfaces which require an update i.e. if any new information is added
# to existing information.
# NOTE: merge_diff will be updated only if there is some new information that is not already
# existing. If existing information needs to be updated then use 'replace'.
self.dcnm_intf_compare_want_and_have ('merged')
def dcnm_compare_default_payload (self, intf, have):
if(intf.get('policy') != have.get('policy')):
return 'DCNM_INTF_NOT_MATCH'
intf_nv = intf.get('interfaces')[0].get('nvPairs')
have_nv = have.get('interfaces')[0].get('nvPairs')
if(intf_nv.get('INTF_VRF') != have_nv.get('INTF_VRF')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('IP') != have_nv.get('IP')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('PREFIX') != have_nv.get('PREFIX')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('ROUTING_TAG') != have_nv.get('ROUTING_TAG')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('MTU') != have_nv.get('MTU')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('SPEED') != have_nv.get('SPEED')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('DESC') != have_nv.get('DESC')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('CONF') != have_nv.get('CONF')):
return 'DCNM_INTF_NOT_MATCH'
if(intf_nv.get('ADMIN_STATE') != have_nv.get('ADMIN_STATE')):
return 'DCNM_INTF_NOT_MATCH'
return 'DCNM_INTF_MATCH'
def dcnm_intf_get_default_eth_payload(self, ifname, sno, fabric):
# default payload to be sent to DCNM for override case
eth_payload = {
"policy": "int_routed_host_11_1",
"interfaces": [
{
"interfaceType": "INTERFACE_ETHERNET",
"serialNumber": sno,
"ifName": "",
"fabricName": fabric,
"nvPairs": {
"interfaceType": "INTERFACE_ETHERNET",
"INTF_VRF": "",
"IP": "",
"PREFIX": "",
"ROUTING_TAG": "",
"MTU": "9216",
"SPEED": "Auto",
"DESC": "",
"CONF": "no shutdown",
"ADMIN_STATE": "true",
"INTF_NAME": ifname
}
}]
}
eth_payload ['interfaces'][0]["ifName"] = ifname
eth_payload ['interfaces'][0]["serialNumber"] = sno
eth_payload ['interfaces'][0]["fabricName"] = fabric
return eth_payload
def dcnm_intf_can_be_replaced(self, have):
for item in self.pb_input:
# For overridden state, we will not touch anything that is present in incoming config,
# because those interfaces will anyway be modified in the current run
if ((self.module.params['state'] == 'overridden') and
(item['ifname'] == have['ifName'])):
return False, | |
<gh_stars>0
import numpy as np
import networkx as nx
import copy
from math import log2
"""
Implementation of the hierarchy coordinates via networkX from
Hierarchy in Complex Networks: The Possible and the Actual [B Corominas-Murtra - 2013] [*] - Supporting Information
Though implemented for unweighted networkX graphs, in the context of its original application,
these are applied to weighted graphs by averaging over the unweighted graphs resulting from
applying thresholds to normalized weighted graphs.
"""
# General Functions:
def distribute(n, end_value_range=None, dist=1, sampled_range_of_dist=(0, 1)):
"""Returns n floats distributed as within the sampled range of provided distribution, rescaled to end_value_range
Defaults to an exponential distribution e^x, x = (0, 1), where int/float values of dist modify the coefficient on x
Parameters
----------
n : int
Number of exponentially distributed points returned
end_value_range : tuple, optional
Range which final values of the distributed points occupy.
Defaults to the distribution's native range
dist : float, default: 1
A in np.exp(A*x)
dist: overloaded: types.FunctionType, optional
Alternate distribution yielding single samples from 1d input
sampled_range_of_dist: tuple, default: (0, 1)
Range of distribution sampled
Returns
-------
pts: numpy array
numpy array of n floats
Examples
--------
n, Max, Min = 100, 10, -10
exp_dist_0 = hc.distribute(n=n, end_value_range=(Min, Max))
exp_dist_1 = hc.distribute(n=n, dist=-2, end_value_range=(Min, Max), sampled_range_of_dist=(1, 2))
dist = lambda x: 4*x*x - 3*x*x*x
parabolic_dist = hc.distribute(n=n, dist=dist, end_value_range=(Min, Max), sampled_range_of_dist=(0, 2))
# Visualization of sampling
plt.xlabel('# samples')
plt.ylabel('sampled value')
plt.plot(exp_dist_0, label='e^x: (0, 1)')
plt.plot(exp_dist_1, label='e^-2x: (1, 2)')
plt.plot(parabolic_dist, label='4x^2 - 3x^3: (0, 2)')
plt.legend()
plt.show()
"""
if isinstance(dist, float) or isinstance(dist, int):
distribution = lambda x: np.exp(dist * x)
else:
distribution = dist
x_increment = np.abs(max(sampled_range_of_dist) - min(sampled_range_of_dist)) / n
pts = np.array([distribution(x_increment*i) for i in range(n)])
pts /= abs(max(pts) - min(pts))
if end_value_range is not None:
pts = pts*(max(end_value_range) - min(end_value_range)) + min(end_value_range)
return pts
def matrix_normalize(matrix, row_normalize=False):
"""normalizes 2d matrices.
Parameters
----------
matrix: square 2d numpy array, nested list,
matrix to be normalized
row_normalize: bool
normalizes row *instead* of default columns if True
Returns
-------
numpy array:
column or row normalized array
Examples
--------
a = np.repeat(np.arange(1, 5), 4).reshape(4, 4)
print(a)
print(np.round(hc.matrix_normalize(a), 2))
print(np.round(hc.matrix_normalize(a, row_normalize=True), 2))
Notes
-----
Should be replaced with appropriate generalized, efficient version
"""
if row_normalize:
row_sums = matrix.sum(axis=1)
return np.array([matrix[index, :] / row_sums[index] if row_sums[index] != 0 else [0] * row_sums.size for index in range(row_sums.size)])
else:
column_sums = matrix.sum(axis=0)
return np.array([matrix[:, index] / column_sums[index] if column_sums[index] != 0 else [0]*column_sums.size for index in range(column_sums.size)]).T
# Hierarchy Coordinate Functions
def weakly_connected_component_subgraphs(G, copy=True):
"""Generate weakly connected components as subgraphs. Re-imported to ensure later NetworkX compatibility
Parameters
----------
G : NetworkX Graph
A directed graph.
copy : bool
If copy is True, graph, node, and edge attributes are copied to the
subgraphs.
Notes
-----
Simply brought in from earlier version of NetworkX to ensure compatibility with later versions.
Not sure why it was dropped...
"""
for comp in nx.weakly_connected_components(G):
if copy:
yield G.subgraph(comp).copy()
else:
yield G.subgraph(comp)
def node_weighted_condense(A, num_thresholds=8, threshold_distribution=None):
"""Returns a series of node_weighted condensed graphs (DAGs) [1]_ and their original nx_graphs.
Parameters
----------
A: numpy array
Adjacency matrix, as square 2d numpy array
num_thresholds: int, default: 8
Number of thresholds and resultant sets of node-weighted Directed Acyclic Graphs
threshold_distribution: float, optional
If true or float, distributes the thresholds exponentially, with an exponent equal to the float input.
Returns
-------
largest_condensed_graphs: list of networkX Graphs
list of node weighted condensed networkx graphs reduced from unweighted digraphs determined by thresholds. (See note)
nx_graphs: list of networkX Graphs
list of unweighted graphs produced from applying thresholds to the original weighted network
Examples
--------
Graphing the resultant network is recommended, as otherwise this is difficult to visualize...
a = np.array([
[0, 0.2, 0, 0, 0],
[0, 0, 0, 0.7, 0],
[0, 0.4, 0, 0, 0],
[0, 0, 0.1, 0, 1.0],
[0, 0, 0, 0, 0],
])
condensed_networks, base_binary_networks = hc.node_weighted_condense(a)
for network in condensed_networks:
print(nx.to_numpy_array(network))
Notes
------
TODO: As multiple independent graphs may form from applying threshold cutoffs to a weighted graph,
only the largest is considered. This might be worth considering in re-evaluating the meaning of
weighted network hierarchy coordinate evaluations. (See pages 7, 8 of [1]_, supplementary material)
An threshold_distribution of None results in a linear distribution, otherwise
the exponential distribution is sampled from exp(x) \in (0, 1)
.. [1] "On the origins of hierarchy in complex networks."
Corominas-Murtra, Bernat, <NAME>, <NAME>, and <NAME>,
Proceedings of the National Academy of Sciences 110, no. 33 (2013)
"""
# Establishing Thresholds
if num_thresholds == 1 or np.isclose(np.max(A) - np.min(A), 0, 1e-2):
nx_graphs = [nx.from_numpy_matrix(A, create_using=nx.DiGraph)]
else:
if threshold_distribution is None:
try:
thresholds = list(np.round(np.arange(np.min(A), np.max(A), (np.max(A - np.min(A))) / num_thresholds), 4)) # linear distribution
except:
thresholds = [np.max(A)]*num_thresholds
else:
thresholds = distribute(dist=threshold_distribution, end_value_range=(np.min(A), np.max(A)), n=num_thresholds)
# Converting to binary nx_graphs according to thresholds:
nx_graphs = [nx.from_numpy_matrix(np.where(A > threshold, 1, 0), create_using=nx.DiGraph) for threshold in thresholds]
nx_graphs = [graph for graph in nx_graphs if not nx.is_empty(graph)] # eliminates empty graphs
# TODO: Possibly better to count empty graphs as a 0
condensed_graphs = [nx.condensation(nx_graphs[index]) for index in range(len(nx_graphs))]
largest_condensed_graphs = []
for condensed_graph in condensed_graphs:
largest_condensed_graphs.append(nx.convert_node_labels_to_integers(
max(weakly_connected_component_subgraphs(condensed_graph, copy=True), key=len)))
# networkx.weakly_connected_component_subgraphs comes from networkx 1.10 documentation, and has sense been discontinued.
# For ease of access and future networkx compatibility, it was copied directly to this file before the class declaration.
members = nx.get_node_attributes(largest_condensed_graphs[-1], 'members')
node_weights = [len(w) for w in members.values()]
for node_index in range(len(node_weights)):
largest_condensed_graphs[-1].nodes[node_index]["weight"] = node_weights[node_index]
return largest_condensed_graphs, nx_graphs
def weight_nodes_by_condensation(condensed_graph):
"""Weights nodes according to the integer number of other nodes they condensed. Proposed in _[1]
e.g. if a cycle contained 3 nodes (and became one in condensation)
the resulting node of the condensed graph would then gain weight = 3.
Parameters
----------
condensed_graph: NetworkX Graph
result of a networkx.condensation call, such that the 'members'
attribute is populated with constituent cyclical nodes
Return
------
condensed_graph: NetworkX Graph
node weighted condensed graph
Examples
--------
Visualization also recommended here (with node size prop. weighting)
b = np.array([
[0, 0.2, 0, 0, 0],
[0, 0, 0, 0.7, 0],
[0, 0.4, 0, 0, 0],
[0, 0, 0.1, 0, 1.0],
[0, 0, 0, 0, 0],
])
num_thresholds = 2
condensed_networks, _ = hc.node_weighted_condense(b, num_thresholds=num_thresholds)
for network_index in range(num_thresholds):
print(f"Network {network_index}:")
for node_index in range(len(condensed_networks[network_index].nodes)):
print(f"Node {node_index}, new weight:", condensed_networks[network_index].nodes[node_index]["weight"])
print()
Note:
------
TODO: Might wish to eliminate return, or enable copying
.. [1] "On the origins of hierarchy in complex networks."
Corominas-Murtra, Bernat, <NAME>, <NAME>, and <NAME>,
Proceedings of the National Academy of Sciences 110, no. 33 (2013)
"""
node_weights = [len(w) for w in nx.get_node_attributes(condensed_graph, 'members').values()]
for node_index in range(len(node_weights)):
condensed_graph.nodes[node_index]["weight"] = node_weights[node_index]
return condensed_graph # Not be necessary, as the graph itself is updated (not copied)
def max_min_layers(G, max_layer=True):
"""
Returns the maximal (k_in = 0, highest in hierarchy) layer (those nodes with in degree = 0) or the minimal layer (k_out = 0)
Parameters
----------
G: NetworkX Graph
A directed graph.
max_layer: bool, default: True
if True, returns maximal layer (k_in = 0), else returns nodes for which k_out = 0, minimal layer
Return
------
min/max_layer: list
list of node indices as ints
Examples:
a = np.array([
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
])
G = nx.from_numpy_matrix(a, create_using=nx.DiGraph)
print(hc.max_min_layers(G))
print(hc.max_min_layers(G, max_layer=False))
Notes:
-------
TODO: Should be two functions?
"""
if max_layer:
return [node for node in G.nodes() if G.in_degree(node) == 0]
else:
return [node for node in G.nodes() if G.out_degree(node) == 0]
def leaf_removal(G, forward=True):
"""Returns a pruned network, with either maximal (k_in=0)
or minimal (k_out = 0) nodes removed upon call.
Parameters
-----------
G: NetworkX Graph
A directed graph.
forward: bool, default: True
if True, prunes from | |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import enum
import functools
from edb import errors
from edb.edgeql import qltypes
from edb.schema import objtypes as s_objtypes
from edb.schema import pointers as s_pointers
from edb.ir import ast as irast
from edb.ir import utils as irutils
from .. import context
if TYPE_CHECKING:
from edb.schema import constraints as s_constr
AT_MOST_ONE = qltypes.Cardinality.AT_MOST_ONE
ONE = qltypes.Cardinality.ONE
MANY = qltypes.Cardinality.MANY
AT_LEAST_ONE = qltypes.Cardinality.AT_LEAST_ONE
class CardinalityBound(int, enum.Enum):
'''This enum is used to perform some of the cardinality operations.'''
ZERO = 0
ONE = 1
MANY = 2
def as_required(self) -> bool:
return self is CB_ONE
def as_schema_cardinality(self) -> qltypes.SchemaCardinality:
if self is CB_MANY:
return qltypes.SchemaCardinality.MANY
else:
return qltypes.SchemaCardinality.ONE
@classmethod
def from_required(cls, required: bool) -> CardinalityBound:
return CB_ONE if required else CB_ZERO
@classmethod
def from_schema_value(
cls,
card: qltypes.SchemaCardinality
) -> CardinalityBound:
if card is qltypes.SchemaCardinality.MANY:
return CB_MANY
else:
return CB_ONE
CB_ZERO = CardinalityBound.ZERO
CB_ONE = CardinalityBound.ONE
CB_MANY = CardinalityBound.MANY
def _card_to_bounds(
card: qltypes.Cardinality
) -> Tuple[CardinalityBound, CardinalityBound]:
lower, upper = card.to_schema_value()
return (
CardinalityBound.from_required(lower),
CardinalityBound.from_schema_value(upper),
)
def _bounds_to_card(
lower: CardinalityBound,
upper: CardinalityBound,
) -> qltypes.Cardinality:
return qltypes.Cardinality.from_schema_value(
lower.as_required(),
upper.as_schema_cardinality(),
)
def _get_set_scope(
ir_set: irast.Set,
scope_tree: irast.ScopeTreeNode) -> irast.ScopeTreeNode:
if ir_set.path_scope_id:
new_scope = scope_tree.root.find_by_unique_id(ir_set.path_scope_id)
if new_scope is None:
raise errors.InternalServerError(
f'dangling scope pointer to node with uid'
f':{ir_set.path_scope_id} in {ir_set!r}'
)
else:
new_scope = scope_tree
return new_scope
def cartesian_cardinality(
args: Iterable[qltypes.Cardinality],
) -> qltypes.Cardinality:
'''Cardinality of Cartesian product of multiple args.'''
card = list(zip(*(_card_to_bounds(a) for a in args)))
if card:
lower, upper = card
return _bounds_to_card(min(lower), max(upper))
else:
# no args is indicative of a empty set
return AT_MOST_ONE
def max_cardinality(
args: Iterable[qltypes.Cardinality],
) -> qltypes.Cardinality:
'''Maximum lower and upper bound of specified cardinalities.'''
card = list(zip(*(_card_to_bounds(a) for a in args)))
if card:
lower, upper = card
return _bounds_to_card(max(lower), max(upper))
else:
# no args is indicative of a empty set
return AT_MOST_ONE
def _union_cardinality(
args: Iterable[qltypes.Cardinality],
) -> qltypes.Cardinality:
'''Cardinality of UNION of multiple args.'''
card = list(zip(*(_card_to_bounds(a) for a in args)))
if card:
lower, upper = card
return _bounds_to_card(
max(lower),
CB_MANY if len(upper) > 1 else upper[0],
)
else:
# no args is indicative of a empty set
return AT_MOST_ONE
def _coalesce_cardinality(
args: Iterable[qltypes.Cardinality],
) -> qltypes.Cardinality:
'''Cardinality of ?? of multiple args.'''
card = list(zip(*(_card_to_bounds(a) for a in args)))
if card:
lower, upper = card
return _bounds_to_card(max(lower), max(upper))
else:
# no args is indicative of a empty set
return AT_MOST_ONE
def _common_cardinality(
args: Iterable[irast.Base],
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return cartesian_cardinality(
infer_cardinality(
a,
scope_tree=scope_tree,
singletons=singletons,
env=env
) for a in args
)
@functools.singledispatch
def _infer_cardinality(
ir: irast.Expr,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
raise ValueError(f'infer_cardinality: cannot handle {ir!r}')
@_infer_cardinality.register
def __infer_none(
ir: None,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
# Here for debugging purposes.
raise ValueError('invalid infer_cardinality(None, schema) call')
@_infer_cardinality.register
def __infer_statement(
ir: irast.Statement,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(
ir.expr, scope_tree=scope_tree, singletons=singletons, env=env)
@_infer_cardinality.register
def __infer_config_insert(
ir: irast.ConfigInsert,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(
ir.expr, scope_tree=scope_tree, singletons=singletons, env=env)
@_infer_cardinality.register
def __infer_emptyset(
ir: irast.EmptySet,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return AT_MOST_ONE
@_infer_cardinality.register
def __infer_typeref(
ir: irast.TypeRef,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return AT_MOST_ONE
@_infer_cardinality.register
def __infer_type_introspection(
ir: irast.TypeIntrospection,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
def _find_visible(
ir: irast.Set,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> Optional[irast.ScopeTreeNode]:
parent_fence = scope_tree.parent_fence
if parent_fence is not None:
if scope_tree.namespaces:
path_id = ir.path_id.strip_namespace(scope_tree.namespaces)
else:
path_id = ir.path_id
return parent_fence.find_visible(path_id)
else:
return None
@_infer_cardinality.register
def __infer_set(
ir: irast.Set,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
if ir.path_id in singletons:
return ONE
if (node := _find_visible(ir, scope_tree, env)) is not None:
return AT_MOST_ONE if node.optional else ONE
rptr = ir.rptr
if rptr is not None:
rptrref = rptr.ptrref
if isinstance(rptrref, irast.TypeIntersectionPointerRef):
ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir)
new_scope = _get_set_scope(ir, scope_tree)
if ind_prefix.rptr is None:
prefix_card = infer_cardinality(
ind_prefix,
scope_tree=new_scope,
singletons=singletons,
env=env,
)
return cartesian_cardinality([prefix_card, AT_MOST_ONE])
else:
# Expression before type intersection is a path,
# i.e Foo.<bar[IS Type]. In this case we must
# take possible intersection specialization of the
# link union into account.
# We're basically restating the body of this function
# in this block, but with extra conditions.
if _find_visible(ind_prefix, new_scope, env) is not None:
return AT_MOST_ONE
else:
rptr_spec: Set[irast.PointerRef] = set()
for ind_ptr in ind_ptrs:
rptr_spec.update(ind_ptr.ptrref.rptr_specialization)
rptr_spec_card = _union_cardinality(
s.dir_cardinality for s in rptr_spec)
base_card = infer_cardinality(
rptr.source,
scope_tree=new_scope,
singletons=singletons,
env=env,
)
# The resulting cardinality is the cartesian
# product of the base to which the type
# intersection is applied and the cardinality due
# to type intersection itself.
return cartesian_cardinality([base_card, rptr_spec_card])
else:
if rptrref.union_components:
# We use cartesian cardinality instead of union cardinality
# because the union of pointers in this context is disjoint
# in a sense that for any specific source only a given union
# component is used.
rptrref_card = cartesian_cardinality(
c.dir_cardinality for c in rptrref.union_components
)
else:
rptrref_card = rptrref.dir_cardinality
if rptrref_card.is_single():
new_scope = _get_set_scope(ir, scope_tree)
source_card = infer_cardinality(
rptr.source,
scope_tree=new_scope,
singletons=singletons,
env=env,
)
return cartesian_cardinality((source_card, rptrref_card))
else:
return MANY
elif ir.expr is not None:
new_scope = _get_set_scope(ir, scope_tree)
return infer_cardinality(
ir.expr,
scope_tree=new_scope,
singletons=singletons,
env=env,
)
else:
return MANY
@_infer_cardinality.register
def __infer_func_call(
ir: irast.FunctionCall,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
# the cardinality of the function call depends on the cardinality
# of non-SET_OF arguments AND the cardinality of the function
# return value
SET_OF = qltypes.TypeModifier.SET_OF
if ir.typemod is SET_OF:
return MANY
else:
args = []
# process positional args
for arg, typemod in zip(ir.args, ir.params_typemods):
if typemod is not SET_OF:
args.append(arg.expr)
if args:
return _common_cardinality(
args,
scope_tree=scope_tree,
singletons=singletons,
env=env,
)
else:
if ir.typemod is qltypes.TypeModifier.OPTIONAL:
return AT_MOST_ONE
else:
return ONE
@_infer_cardinality.register
def __infer_oper_call(
ir: irast.OperatorCall,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
if ir.func_shortname == 'std::UNION':
# UNION needs to "add up" cardinalities.
return _union_cardinality(
infer_cardinality(
a.expr,
scope_tree=scope_tree,
singletons=singletons,
env=env
) for a in ir.args
)
elif ir.func_shortname == 'std::??':
# Coalescing takes the maximum of both lower and upper bounds.
return _coalesce_cardinality(
infer_cardinality(
a.expr,
scope_tree=scope_tree,
singletons=singletons,
env=env
) for a in ir.args
)
else:
args: List[irast.Base] = []
all_optional = False
if ir.typemod is qltypes.TypeModifier.SET_OF:
# this is DISTINCT and IF..ELSE
args = [a.expr for a in ir.args]
else:
all_optional = True
for arg, typemod in zip(ir.args, ir.params_typemods):
if typemod is not qltypes.TypeModifier.SET_OF:
all_optional &= typemod is qltypes.TypeModifier.OPTIONAL
args.append(arg.expr)
if args:
card = _common_cardinality(
args,
scope_tree=scope_tree,
singletons=singletons,
env=env,
)
if all_optional:
# An operator that has all optional arguments and
# doesn't return a SET OF returns at least ONE result
# (we currently don't have operators that return
# OPTIONAL). So we upgrade the lower bound.
_, upper = _card_to_bounds(card)
card = _bounds_to_card(CB_ONE, upper)
return card
else:
return AT_MOST_ONE
@_infer_cardinality.register
def __infer_const(
ir: irast.BaseConstant,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
@_infer_cardinality.register
def __infer_param(
ir: irast.Parameter,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return ONE if ir.required else AT_MOST_ONE
@_infer_cardinality.register
def __infer_const_set(
ir: irast.ConstantSet,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return ONE if len(ir.elements) == 1 else AT_LEAST_ONE
@_infer_cardinality.register
def __infer_typecheckop(
ir: irast.TypeCheckOp,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(
ir.left,
scope_tree=scope_tree,
singletons=singletons,
env=env,
)
@_infer_cardinality.register
def __infer_typecast(
ir: irast.TypeCast,
*,
scope_tree: irast.ScopeTreeNode,
singletons: Collection[irast.PathId],
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(
ir.expr,
scope_tree=scope_tree,
singletons=singletons,
env=env,
)
def _is_ptr_or_self_ref(
ir_expr: irast.Base,
result_expr: irast.Set,
env: context.Environment,
) -> bool:
if not isinstance(ir_expr, irast.Set):
return False
else:
ir_set = ir_expr
srccls = env.set_types[result_expr]
return (
isinstance(srccls, s_objtypes.ObjectType) and
ir_set.expr | |
<reponame>shinying/SA
import copy
import math
import numpy as np
import pandas as pd
from typing import List, Dict
from datetime import timedelta, datetime, time
from sys import stderr
from random import random, randint, choice
from collections import deque
from .model import Task, Tug, Ship, TmpTask, TaskState, ShipState, TugState, ChargeType, Company
from .event import Event, ConfirmTask, ChangeTypes, StartWork, StartTimeDelay, Canceled
from .event import WorkTimeDelay, TempNeed, EndWork, Routine
from .simu_params import *
from .settings import WINDOW_SIZE, PENALTY, CALL_HELP_THR, ExecState
from .utils.utility import count_move_time, get_pier_latlng, calculate_revenue
class Simulator:
def __init__(self, tasks: List[Task], tugs: List[Tug], help_tugs=[], subject=None, verbose=True):
self.all_tasks = tasks
self.tasks_que = deque(sorted(tasks, key=lambda task: task.start_time))
self.tugs = tugs
self.help_tugs = help_tugs
self.all_tugs = tugs
for i in self.all_tugs:
print("here",i.tug_id)
self.system_time = self.tasks_que[0].start_time
self.subject = subject
self.verbose = verbose
self.call_help = True if help_tugs else False
self.pre_duty_tugs = []
if subject and subject not in Company:
raise ValueError("Wrong company.")
self.events: List[Event] = []
self.change_events: List[ChangeTypes] = []
self.start_events: List[StartWork] = []
self.confirm_events: List[ConfirmTask] = []
self.tmp_tasks: List[TmpTask] = []
self.done_tmp_tasks: List[TmpTask] = []
self.tasks = []
self.result = {}
self.n_calls = 0
def segment(self, time):
new_tasks = []
while self.tasks_que and self.tasks_que[0].start_time <= time + WINDOW_SIZE:
new_tasks.append(self.tasks_que.popleft())
if new_tasks:
if self.verbose:
print("[Add Tasks]", [task.id for task in new_tasks])
self.tasks.extend(new_tasks)
self.gen_init_events(new_tasks)
return bool(new_tasks)
def get_duty_period(self,systime):
start_time = self.system_time
end_time = self.system_time
if time(hour = 8) <= start_time.time() <= time(hour = 20):
start_time = systime.replace(hour=8, minute=0, second=0, microsecond=0)
end_time = systime.replace(hour=20, minute=0, second=0, microsecond=0)
else:
if time(hour = 20) <= start_time.time() <= time(hour = 23):
start_time = (systime).replace(hour=20, minute=0, second=0, microsecond=0)
end_time = (systime+timedelta(days=1)).replace(hour=8, minute=0, second=0, microsecond=0)
else:
start_time = (systime-timedelta(days=1)).replace(hour=20, minute=0, second=0, microsecond=0)
end_time = systime.replace(hour=8, minute=0, second=0, microsecond=0)
return start_time, end_time
def get_duty_tugs(self):
for i in self.all_tugs:
print("all tugs",i.tug_id,i.next_available_time)
start_time , end_time = self.get_duty_period(self.system_time)
# tug of previous duty
ori_tugs = [tug for tug in self.all_tugs if tug.state == TugState.BUSY]
# tug of present duty
tugs = [tug for tug in self.all_tugs if start_time <= tug.duty_period and tug.duty_period <= end_time]
unique_tug_id = list(set([ i.tug_id for i in tugs ])) # tug of present duty
collect_tug_id = list(set([ i.tug_id for i in ori_tugs])) # tug of previous duty did not finish work
unique_tugs = ori_tugs # tug of previous duty did not finish work
pre_tugs_id = list(set([ i.tug_id for i in self.pre_duty_tugs]))
## select the tug didn't finish work and other tug on duty now
for t in tugs:
if t.tug_id in unique_tug_id and t.tug_id in pre_tugs_id and t.tug_id not in collect_tug_id:
for pre_t in self.pre_duty_tugs:
if t.tug_id == pre_t.tug_id:
t.next_available_time = pre_t.next_available_time
if t.tug_id in unique_tug_id and t.tug_id not in collect_tug_id and t not in set(unique_tugs):
## update the tugs next_available time if keep on duty
unique_tugs.append(t)
collect_tug_id.append(t.tug_id)
if len(unique_tugs) < 3:
while len(unique_tugs) < 4:
for i in range(len(self.all_tugs)):
if self.all_tugs[i].tug_id not in unique_tug_id:
unique_tugs.append(self.all_tugs[i])
unique_tug_id = list(set([ i.tug_id for i in unique_tugs]))
# for i in unique_tugs:
# print("tug on duty",i.tug_id,"next_available_time",i.next_available_time)
self.pre_duty_tugs = unique_tugs
assert len(unique_tugs) >= 2, 'not enough tug to dispatch, if ask for three tugs {}'.format(unique_tugs)
return unique_tugs
def run(self, method) -> Dict:
"""Simulator's main function, handle events and schedule the tugs
"""
self.method = method
while self.tasks_que:
self.segment(self.tasks_que[0].start_time)
self.tugs = self.get_duty_tugs()
self.schedule()
self.events.sort(key=lambda event: (event.time, event.order))
while self.events:
event = self.events.pop(0)
self.system_time = event.time
add_new = self.segment(event.time)
self.tugs = self.get_duty_tugs()
if type(event) is not Routine and event.task.task_state is TaskState.CANCELED:
continue
if self.verbose:
print(event)
handle_state = event.handle()
# additional process according to event type
if type(event) is ConfirmTask and type(event.task) is TmpTask:
event.task.ori_task.tugs.extend(event.task.tugs)
self.done_tmp_tasks.append(event.task)
self.tmp_tasks.remove(event.task)
elif type(event) is ChangeTypes or type(event) is StartTimeDelay:
if handle_state is ExecState.PROBLEM:
confirm = next(eve for eve in event.task.events if type(eve) is ConfirmTask)
self.events.append(confirm)
elif type(event) is StartWork:
self.insert_event(self.gen_work_delay_event(event.task))
need_event = self.gen_temp_need_event(event.task)
if need_event is not None:
self.insert_event(need_event)
self.start_events.remove(event)
elif type(event) is WorkTimeDelay:
self.insert_event(self.gen_end_event(event.task))
elif type(event) is TempNeed:
if event.task.task_state is TaskState.PROCESSED:
continue
self.handle_tmp_task(event)
elif type(event) is EndWork:
self.tasks.remove(event.task)
# Call dispatch algorithm
if (type(event) in [Routine, WorkTimeDelay, StartTimeDelay,
ChangeTypes, TempNeed, Canceled]) or add_new \
or (type(event) is ConfirmTask and event.task.id < 0):
self.schedule()
# Check if gap between events is too large
if any(task.task_state is TaskState.UNPROCESSED_UNASSIGNED \
for task in self.all_tasks):
for eve in self.events:
if type(eve) in [StartTimeDelay, WorkTimeDelay, \
Canceled, ChangeTypes, TempNeed, Routine]:
break
if type(eve) is ConfirmTask and \
eve.task.task_state is TaskState.UNPROCESSED_UNASSIGNED and \
eve.time - event.time > timedelta(minutes=ROUTINE_DISPATCH):
self.insert_event(Routine(None, event.time+timedelta(hours=1)))
break
# if i != 0 and (i == len(self.events) or \
# self.events[i].time - event.time > timedelta(minutes=ROUTINE_DISPATCH)):
# self.insert_event(Routine(None, event.time+timedelta(hours=1)))
self.events.sort(key=lambda event: (event.time, event.order))
self.collect_result()
return self.result
## ------------ Methods dispatching tasks and assign tugs to tasks ------------
def schedule(self):
"""Execute the dispatching algorithm
"""
task_dp = [t for t in self.tasks if t.task_state == TaskState.UNPROCESSED_UNASSIGNED]
task_dp.sort(key=lambda task: task.start_time)
all_tasks = self.tmp_tasks + task_dp
# [print(tug) for tug in self.tugs]
# modify extra worktime delay increased by temp need at the last dispatching
for task in all_tasks:
if task.id < 0 and task.ori_task.extra_wait > timedelta(0):
task.ori_task.work_time -= task.ori_task.extra_wait
for tug in task.ori_task.tugs:
tug.next_available_time -= task.ori_task.extra_wait
if all_tasks:
self.n_calls += 1
if self.n_calls == 1:
self.tugs = self.get_duty_tugs()
if self.verbose:
print("\n[Scheduling] Dispatch {} tasks with {}...".format(len(all_tasks),
self.method.__name__), file=stderr)
if self.call_help:
tug_sets, times = self.method(all_tasks, self.tugs, self.help_tugs,
True, CALL_HELP_THR, self.system_time)
if random() < HELP_FAILED_PROB:
if self.verbose: print("[Call Help Failed!]")
tug_sets, times = self.method(all_tasks, self.tugs, [],
False, CALL_HELP_THR, self.system_time)
self.assign(all_tasks, tug_sets, times)
else:
tug_sets, times = self.method(all_tasks, self.tugs, [],
False, CALL_HELP_THR, self.system_time)
self.assign(all_tasks, tug_sets, times)
# Update confirming and starting time
self.update_tasks_time(all_tasks)
# If a tug which is serving a task delayed by temp need,
# its next available time will be updated in update_tmp_task
# and the starting time of the undone tasks served by the tug
# will also be delayed, so we need to redispatch
# => should be implemented in dispatching algorithms
for event in self.confirm_events:
event.time = event.task.last_tug_move
for event in self.start_events:
event.time = event.task.start_time_real
if self.verbose:
print("")
def assign(self, tasks, tugss, times):
"""Assign tugs and generate new ComfirmTask if tugs change after comfirmation
"""
for task, tugs, start_time in zip(tasks, tugss, times):
if type(task) is TmpTask:
for tug in task.tugs:
assert tug not in task.ori_task.tugs, \
"Tugs for TmpTask should be different from the original task's tugs"
task.assign_tugs(tugs, start_time)
## ------------ Methods updating times of events and tasks ------------
def update_tasks_time(self, tasks):
"""Rearrange ConfirmTask and StartWork according to new starting time
"""
tasks.sort(key=lambda task: task.start_time_real)
# Initial tugs with their current state
poses = {}
# piers = {}
for tug in self.tugs:
poses[tug.tug_id] = tug.pos
# if tug.tasks:
# piers[tug.tug_id] = tug.tasks[-1].to
# else:
# piers[tug.tug_id] = 0
# rearrange events so that ConfirmTask happens
# no later than the last tug starts moving
for task in tasks:
if not task.tugs:
task.last_tug_move = task.start_time
continue
start_move_times = []
for tug in task.tugs:
move = count_move_time(poses[tug.tug_id], task.start)
start = task.start_time_real - move
start_move_times.append(start)
poses[tug.tug_id] = get_pier_latlng(task.to)
# piers[tug.tug_id] = task.to
task.last_tug_move = max(start_move_times)
if task.id < 0:
self.update_task_end_by_tmp_need(task.ori_task, task)
## ---------------- Methods dealing with temp tasks ----------------
def handle_tmp_task(self, event):
# Create a temporary task for the new requirement
tmp_task = TmpTask(event.task, event.req_types, event.time)
self.tmp_tasks.append(tmp_task)
new_cf = ConfirmTask(tmp_task, event.time)
self.insert_event(new_cf)
self.confirm_events.append(new_cf)
def update_task_end_by_tmp_need(self, task, tmp_task):
"""Modify the working time by the waiting time caused by the temp need tugs
"""
extra_wait = tmp_task.start_time_real - tmp_task.start_time
past_extra = task.extra_wait
task.extra_wait = extra_wait
for tug in task.tugs:
tug.next_available_time += extra_wait
for eve in task.events:
if type(eve) is EndWork:
eve.time += (extra_wait - past_extra)
break
task.work_time += extra_wait
tmp_task.work_time = task.start_time_real + task.work_time \
- tmp_task.start_time_real
## ------ Methods generating events before the all tasks being processed ------
def gen_init_events(self, tasks):
ch, cf, st = self.gen_change_events(tasks), self.gen_confirm_events(tasks), \
self.gen_start_events(tasks)
self.change_events.extend(ch)
self.confirm_events.extend(cf)
self.start_events.extend(st)
self.events.extend(ch+cf+self.gen_start_delay_events(tasks)+
st+self.gen_canceled_events(tasks))
self.events.sort(key=lambda event: event.time)
def gen_confirm_events(self, tasks) -> List[ConfirmTask]:
events = []
for task in tasks:
event = | |
returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class PropertyPage(DispatchBaseClass):
CLSID = IID('{0006307E-0000-0000-C000-000000000046}')
coclass_clsid = None
_prop_map_get_ = {
"Dirty": (8449, 2, (3, 0), ((16395, 10),), "Dirty", None),
}
_prop_map_put_ = {
}
class PropertyPageSite(DispatchBaseClass):
CLSID = IID('{0006307F-0000-0000-C000-000000000046}')
coclass_clsid = None
def OnStatusChange(self):
return self._oleobj_.InvokeTypes(8448, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
class PropertyPages(DispatchBaseClass):
CLSID = IID('{00063080-0000-0000-C000-000000000046}')
coclass_clsid = None
def Add(self, Page=defaultNamedNotOptArg, Title=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(95, LCID, 1, (24, 0), ((12, 1), (8, 17)),Page
, Title)
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (9, 0), ((12, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, 'Item', None)
return ret
def Remove(self, Index=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(84, LCID, 1, (24, 0), ((12, 1),),Index
)
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class Recipient(DispatchBaseClass):
CLSID = IID('{00063045-0000-0000-C000-000000000046}')
coclass_clsid = None
def Delete(self):
return self._oleobj_.InvokeTypes(110, LCID, 1, (24, 0), (),)
def FreeBusy(self, Start=defaultNamedNotOptArg, MinPerChar=defaultNamedNotOptArg, CompleteFormat=defaultNamedOptArg):
# Result is a Unicode object - return as-is for this version of Python
return self._oleobj_.InvokeTypes(111, LCID, 1, (8, 0), ((7, 1), (3, 1), (12, 17)),Start
, MinPerChar, CompleteFormat)
def Resolve(self):
return self._oleobj_.InvokeTypes(113, LCID, 1, (11, 0), (),)
_prop_map_get_ = {
"Address": (12291, 2, (8, 0), (), "Address", None),
# Method 'AddressEntry' returns object of type 'AddressEntry'
"AddressEntry": (121, 2, (9, 0), (), "AddressEntry", '{0006304B-0000-0000-C000-000000000046}'),
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"AutoResponse": (106, 2, (8, 0), (), "AutoResponse", None),
"Class": (61450, 2, (3, 0), (), "Class", None),
"DisplayType": (14592, 2, (3, 0), (), "DisplayType", None),
"EntryID": (61470, 2, (8, 0), (), "EntryID", None),
"Index": (91, 2, (3, 0), (), "Index", None),
"MeetingResponseStatus": (102, 2, (3, 0), (), "MeetingResponseStatus", None),
"Name": (12289, 2, (8, 0), (), "Name", None),
"Parent": (109, 2, (9, 0), (), "Parent", None),
"Resolved": (100, 2, (11, 0), (), "Resolved", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
"TrackingStatus": (118, 2, (3, 0), (), "TrackingStatus", None),
"TrackingStatusTime": (119, 2, (7, 0), (), "TrackingStatusTime", None),
"Type": (3093, 2, (3, 0), (), "Type", None),
}
_prop_map_put_ = {
"AddressEntry": ((121, LCID, 8, 0),()),
"AutoResponse": ((106, LCID, 4, 0),()),
"TrackingStatus": ((118, LCID, 4, 0),()),
"TrackingStatusTime": ((119, LCID, 4, 0),()),
"Type": ((3093, LCID, 4, 0),()),
}
class Recipients(DispatchBaseClass):
CLSID = IID('{0006303B-0000-0000-C000-000000000046}')
coclass_clsid = None
# Result is of type Recipient
def Add(self, Name=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(111, LCID, 1, (9, 0), ((8, 1),),Name
)
if ret is not None:
ret = Dispatch(ret, 'Add', '{00063045-0000-0000-C000-000000000046}')
return ret
# Result is of type Recipient
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (9, 0), ((12, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, 'Item', '{00063045-0000-0000-C000-000000000046}')
return ret
def Remove(self, Index=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(84, LCID, 1, (24, 0), ((3, 1),),Index
)
def ResolveAll(self):
return self._oleobj_.InvokeTypes(126, LCID, 1, (11, 0), (),)
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class RecurrencePattern(DispatchBaseClass):
CLSID = IID('{00063044-0000-0000-C000-000000000046}')
coclass_clsid = None
# Result is of type AppointmentItem
def GetOccurrence(self, StartDate=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(4111, LCID, 1, (13, 0), ((7, 1),),StartDate
)
if ret is not None:
# See if this IUnknown is really an IDispatch
try:
ret = ret.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.error:
return ret
ret = Dispatch(ret, 'GetOccurrence', '{00061030-0000-0000-C000-000000000046}')
return ret
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"DayOfMonth": (4096, 2, (3, 0), (), "DayOfMonth", None),
"DayOfWeekMask": (4097, 2, (3, 0), (), "DayOfWeekMask", None),
"Duration": (4109, 2, (3, 0), (), "Duration", None),
"EndTime": (4108, 2, (7, 0), (), "EndTime", None),
# Method 'Exceptions' returns object of type 'Exceptions'
"Exceptions": (4110, 2, (9, 0), (), "Exceptions", '{0006304C-0000-0000-C000-000000000046}'),
"Instance": (4099, 2, (3, 0), (), "Instance", None),
"Interval": (4100, 2, (3, 0), (), "Interval", None),
"MonthOfYear": (4102, 2, (3, 0), (), "MonthOfYear", None),
"NoEndDate": (4107, 2, (11, 0), (), "NoEndDate", None),
"Occurrences": (4101, 2, (3, 0), (), "Occurrences", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
"PatternEndDate": (4098, 2, (7, 0), (), "PatternEndDate", None),
"PatternStartDate": (4104, 2, (7, 0), (), "PatternStartDate", None),
"RecurrenceType": (4103, 2, (3, 0), (), "RecurrenceType", None),
"Regenerate": (4106, 2, (11, 0), (), "Regenerate", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
"StartTime": (4105, 2, (7, 0), (), "StartTime", None),
}
_prop_map_put_ = {
"DayOfMonth": ((4096, LCID, 4, 0),()),
"DayOfWeekMask": ((4097, LCID, 4, 0),()),
"Duration": ((4109, LCID, 4, 0),()),
"EndTime": ((4108, LCID, 4, 0),()),
"Instance": ((4099, LCID, 4, 0),()),
"Interval": ((4100, LCID, 4, 0),()),
"MonthOfYear": ((4102, LCID, 4, 0),()),
"NoEndDate": ((4107, LCID, 4, 0),()),
"Occurrences": ((4101, LCID, 4, 0),()),
"PatternEndDate": ((4098, LCID, 4, 0),()),
"PatternStartDate": ((4104, LCID, 4, 0),()),
"RecurrenceType": ((4103, LCID, 4, 0),()),
"Regenerate": ((4106, LCID, 4, 0),()),
"StartTime": ((4105, LCID, 4, 0),()),
}
class Selection(DispatchBaseClass):
CLSID = IID('{00063087-0000-0000-C000-000000000046}')
coclass_clsid = None
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (9, 0), ((12, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, 'Item', None)
return ret
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class SyncObjectEvents:
CLSID = CLSID_Sink = IID('{00063085-0000-0000-C000-000000000046}')
coclass_clsid = IID('{00063084-0000-0000-C000-000000000046}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
61443 : "OnError",
61442 : "OnProgress",
61441 : "OnSyncStart",
61444 : "OnSyncEnd",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnError(self, Code=defaultNamedNotOptArg, Description=defaultNamedNotOptArg):
# def OnProgress(self, State=defaultNamedNotOptArg, Description=defaultNamedNotOptArg, Value=defaultNamedNotOptArg, Max=defaultNamedNotOptArg):
# def OnSyncStart(self):
# def OnSyncEnd(self):
class SyncObjects(DispatchBaseClass):
CLSID = IID('{00063086-0000-0000-C000-000000000046}')
coclass_clsid = None
# Result is of type SyncObject
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (13, 0), ((12, 1),),Index
)
if ret is not None:
# See if this IUnknown is really an IDispatch
try:
ret = ret.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.error:
return ret
ret = Dispatch(ret, 'Item', '{00063084-0000-0000-C000-000000000046}')
return ret
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may | |
import dateutil
from typing import List
import numpy as np
import pandas as pd
from macpie._config import get_option
from macpie import lltools, strtools
def add_diff_days(
df: pd.DataFrame, col_start: str, col_end: str, diff_days_col: str = None, inplace=False
):
"""Adds a column to DataFrame called ``_diff_days`` which contains
the number of days between ``col_start`` and ``col_end``
:param df: DataFrame
:param col_start: column containing the start date
:param col_end: column containing the end date
"""
if diff_days_col is None:
diff_days_col = get_option("column.system.diff_days")
if col_start == col_end:
raise KeyError("date columns have the same name: {col_start}=={col_end}")
if not inplace:
df = df.copy()
df[diff_days_col] = df[col_end] - df[col_start]
df[diff_days_col] = df[diff_days_col] / np.timedelta64(1, "D")
# df.assign(**{diff_days_col: (df[col_end] - df[col_start]) / np.timedelta64(1, "D")})
if not inplace:
return df
def any_duplicates(df: pd.DataFrame, col: str, ignore_nan: bool = False):
"""Return ``True`` if there are any duplicates in ``col``.
:param df: DataFrame
:param col: column to check for duplicates
:param ignore_nan: Whether to ignore ``nan`` values
"""
col = get_col_name(df, col)
if ignore_nan is True:
return df[col].dropna().duplicated().any()
return df[col].duplicated().any()
def assimilate(left: pd.DataFrame, right: pd.DataFrame):
"""Assimilate ``right`` to look like ``left`` by casting column data types in ``right``
to the data types in ``left`` where the column name is the same.
:param left: left DataFrame
:param right: right DataFrame
"""
# give me all the elements in left that are also in right
left_columns = set(left.columns)
right_columns = set(right.columns)
left_dtypes_dict = left.dtypes.to_dict()
# find columns that are only in left but not in right
left_only_cols = left_columns.difference(right_columns)
for col in left_only_cols:
del left_dtypes_dict[col]
for col_name, dtype in left_dtypes_dict.items():
try:
right = right.astype({col_name: dtype})
except pd.errors.IntCastingNaNError:
pass
return right
def diff_cols(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""Return a length-2 tuple where the first element is the set of columns that
exist in ``left``, and the second element is the set of columns that only
exist in ``right``.
:param left: left DataFrame
:param right: right DataFrame
:param cols_ignore: columns to ignore
:param cols_ignore_pat: Character sequence or regular expression.
Column names that match will be ignored.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
left_columns = set(left.columns)
right_columns = set(right.columns)
left_columns = left_columns - set(cols_ignore)
right_columns = right_columns - set(cols_ignore)
left_only_cols = left_columns - right_columns
right_only_cols = right_columns - left_columns
return (left_only_cols, right_only_cols)
def diff_rows(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""If ``left`` and ``right`` share the same columns, returns a DataFrame
containing rows that differ.
:param left: left DataFrame
:param right: right DataFrame
:param cols_ignore: a list of any columns to ignore
"""
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
left_only_cols, right_only_cols = diff_cols(left, right)
if left_only_cols == right_only_cols == set():
indicator_col_name = get_option("column.system.prefix") + "_diff_rows_merge"
if isinstance(left.columns, pd.MultiIndex) or isinstance(right.columns, pd.MultiIndex):
# TODO: Doing a pd.merge() on MultiIndex dataframes with indicator
# set to True/string resulted in the following error:
# pandas.errors.PerformanceWarning: dropping on a non-lexsorted multi-index
# without a level parameter may impact performance
# Flatten the column MultiIndexes to get around this
left.columns = left.columns.to_flat_index()
right.columns = right.columns.to_flat_index()
merged_df = pd.merge(left, right, indicator=indicator_col_name, how="outer")
changed_rows_df = merged_df[merged_df[indicator_col_name] != "both"]
return changed_rows_df
raise KeyError("Dataframes do not share the same columns")
def drop_cols(df: pd.DataFrame, cols_list=set(), cols_pat=None):
"""Drop specified columns
:param cols_list: List of columns to drop. Defaults to set()
:param cols_pat: Character sequence or regular expression.
Column names that match will be dropped.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
# Default pattern is to match nothing to ignore nothing
cols_pat = "$^" if cols_pat is None else cols_pat
if isinstance(df.columns, pd.MultiIndex):
last_level = df.columns.nlevels - 1
cols = df.columns.get_level_values(last_level)
else:
cols = df.columns
cols_match_pat = cols.str.contains(cols_pat, regex=True)
cols_to_keep = np.invert(cols_match_pat)
df = df.loc[:, cols_to_keep]
df = df.drop(columns=cols_list, errors="ignore")
return df
def drop_suffix(df: pd.DataFrame, suffix):
"""Removes the ``suffix`` in any column name containing the ``suffix``.
:param df: DataFrame
:param suffix: suffix to drop
"""
return df.rename(columns=lambda x: strtools.strip_suffix(x, suffix))
def equals(left: pd.DataFrame, right: pd.DataFrame, cols_ignore=set(), cols_ignore_pat=None):
"""For testing equality of :class:`pandas.DataFrame` objects
:param df1: left DataFrame to compare
:param df2: right DataFrame to compare
:param cols_ignore: DataFrame columns to ignore in comparison
:param cols_ignore_pat: Character sequence or regular expression.
Column names that match will be ignored in comparison.
Defaults to None, which uses the pattern
``'$^'`` to match nothing to ignore nothing
"""
# columns should be same type (e.g. Index or MultiIndex)
if type(left.columns) != type(right.columns):
raise TypeError(
f"Left columns type ('{type(left.columns)}') is "
f"different than right columns type ('{type(right.columns)}')"
)
if isinstance(left.columns, pd.MultiIndex):
if left.columns.nlevels != right.columns.nlevels:
raise ValueError("MultiIndexes have different levels.")
left = drop_cols(left, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
right = drop_cols(right, cols_list=cols_ignore, cols_pat=cols_ignore_pat)
try:
right = left.mac.assimilate(right)
except NotImplementedError:
pass
return left.equals(right)
def flatten_multiindex(df: pd.DataFrame, axis: int = 0, delimiter: str = "_"):
"""Flatten (i.e. collapse) the multiindex on a particular ``axis`` using
a ``delimiter``.
:param df: DataFrame
:param axis: on which axis to flatten the multiindex. ``0`` for index, ``1`` for columns
:param delimiter: delimiter to join multiindex levels on
"""
if axis == 0:
if isinstance(df.index, pd.MultiIndex):
df.index = [delimiter.join(str(idx) for idx in idx_tup) for idx_tup in df.index]
elif axis == 1:
if isinstance(df.columns, pd.MultiIndex):
df.columns = [delimiter.join(str(col) for col in col_tup) for col_tup in df.columns]
def get_col_name(df: pd.DataFrame, col_name):
"""Get the properly-cased column name from ``df``, ignoring case.
:param df: DataFrame
:param col_name: case-insensitive name of the column
"""
if col_name is None:
raise KeyError("column to get is 'None'")
if lltools.is_list_like(col_name):
for col in df.columns:
if lltools.list_like_str_equal(col, col_name, case_sensitive=False):
return col
raise KeyError(f"column not found: {col_name}")
if isinstance(col_name, str):
for col in df.columns:
if strtools.str_equals(col, col_name, case_sensitive=False):
return col
raise KeyError(f"column not found: {col_name}")
def get_col_names(df: pd.DataFrame, col_names: List[str], strict=True):
"""Get the properly-cased columns names from ``df``, ignoring case.
:param df: DataFrame
:param col_names: list of case-insensitive column names
:param strict: if True, raise error if a column can't be found, otherwise
return None for that column
"""
df_col_names = []
for col in col_names:
try:
df_col = get_col_name(df, col)
except KeyError as e:
if strict:
raise e
else:
df_col = None
df_col_names.append(df_col)
return df_col_names
def insert(df: pd.DataFrame, col_name, col_value, allow_duplicates=False):
"""Adds a column to the end of the DataFrame
:param df: DataFrame
:param col_name: name of column to insert
:param col_value: value of column to insert
"""
return df.insert(len(df.columns), col_name, col_value, allow_duplicates=allow_duplicates)
def is_date_col(arr_or_dtype):
"""Check whether the provided array or dtype is of the datetime64 dtype.
:param arr_or_dtype: The array or dtype to check
"""
return pd.api.types.is_datetime64_any_dtype(arr_or_dtype)
def mark_duplicates_by_cols(df: pd.DataFrame, cols: List[str]):
"""Create a column in ``df`` called ``_duplicates`` which is a boolean Series
denoting duplicate rows as identified by ``cols``.
:param df: DataFrame
:param cols: Only consider these columns for identifiying duplicates
"""
df[get_option("column.system.duplicates")] = df.duplicated(subset=cols, keep=False)
return df
def replace_suffix(df: pd.DataFrame, old_suffix, new_suffix):
"""For any column names containing ``old_suffix``, replace the ``old_suffix``
with ``new_suffix``.
:param df: DataFrame
:param old_suffix: suffix to replace
:param new_suffix: suffix to replace ``old_suffix``
"""
return df.rename(
columns=lambda x: x[: -len(old_suffix)] + new_suffix if x.endswith(old_suffix) else x
)
def to_datetime(df: pd.DataFrame, date_col_name):
"""Convert ``date_col_name`` column in ``df`` to datetime.
:param df: DataFrame
:param date_col_name: column to convert
"""
try:
_date_col = get_col_name(df, date_col_name)
if not is_date_col(df[_date_col]):
df[_date_col] = pd.to_datetime(df[_date_col])
return _date_col
except KeyError:
raise KeyError(f"Date column '{date_col_name}' in dataframe is not a valid column")
except ValueError:
raise TypeError(
f"Date column '{date_col_name}' in dataframe contains string(s) that "
"are not likely datetime(s)"
)
except TypeError as e:
raise TypeError(
(
f"Date column '{date_col_name}' in dataframe contains values "
f"that are not convertible to datetime"
)
) from e
except dateutil.parser.ParserError:
raise ValueError(
(
f"Date column '{date_col_name}' in dataframe could not be parsed "
f"as a datetime string"
)
)
except pd.errors.OutOfBoundsDatetime:
# Since pandas represents timestamps in nanosecond resolution,
# the time span that can be represented using a 64-bit integer
# is limited to approximately 584 years.
raise ValueError(
(
f"Date column '{date_col_name}' in dataframe contains | |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import automl_v1
from google.cloud.automl_v1.proto import annotation_spec_pb2
from google.cloud.automl_v1.proto import dataset_pb2
from google.cloud.automl_v1.proto import io_pb2
from google.cloud.automl_v1.proto import model_evaluation_pb2
from google.cloud.automl_v1.proto import model_pb2
from google.cloud.automl_v1.proto import service_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestAutoMlClient(object):
def test_create_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_dataset", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
response = client.create_dataset(parent, dataset)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.CreateDatasetRequest(
parent=parent, dataset=dataset
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_dataset_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_dataset_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
response = client.create_dataset(parent, dataset)
exception = response.exception()
assert exception.errors[0] == error
def test_update_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
dataset = {}
update_mask = {}
response = client.update_dataset(dataset, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateDatasetRequest(
dataset=dataset, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
dataset = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_dataset(dataset, update_mask)
def test_get_dataset(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name_2,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.get_dataset(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
with pytest.raises(CustomException):
client.get_dataset(name)
def test_list_datasets(self):
# Setup Expected Response
next_page_token = ""
datasets_element = {}
datasets = [datasets_element]
expected_response = {"next_page_token": next_page_token, "datasets": datasets}
expected_response = service_pb2.ListDatasetsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.datasets[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListDatasetsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_datasets_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_dataset(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_dataset", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_dataset_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_dataset_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
exception = response.exception()
assert exception.errors[0] == error
def test_import_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_import_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ImportDataRequest(
name=name, input_config=input_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_import_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
exception = response.exception()
assert exception.errors[0] == error
def test_export_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportDataRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_get_annotation_spec(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
example_count = 1517063674
expected_response = {
"name": name_2,
"display_name": display_name,
"example_count": example_count,
}
expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
response = client.get_annotation_spec(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetAnnotationSpecRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_annotation_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
with pytest.raises(CustomException):
client.get_annotation_spec(name)
def test_create_model(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
"etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch | |
<gh_stars>0
"""
Library Features:
Name: lib_ecmwf_0100_variables
Author(s): <NAME> (<EMAIL>)
Date: '2020210'
Version: '1.5.0'
"""
#######################################################################################
# Library
import logging
import numpy as np
from src.hyde.algorithm.io.nwp.ecmwf.lib_ecmwf_io_generic import reshape_var3d, create_darray_3d
from src.hyde.algorithm.settings.nwp.ecmwf.lib_ecmwf_args import logger_name
from src.hyde.model.astronomic_radiation.lib_astrorad_utils import computeCloudFactor
from src.hyde.driver.model.astronomic_radiation.drv_model_astrorad_exec import AstroRadModel
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to define variable attribute(s)
def getVarAttributes(var_attrs_in):
var_attrs_tmp = {}
for var_attrs_step in var_attrs_in:
for var_attr_key, var_attr_value in var_attrs_step.items():
if var_attr_key not in list(var_attrs_tmp.keys()):
var_attrs_tmp[var_attr_key] = var_attr_value
else:
var_attr_tmp = var_attrs_tmp[var_attr_key]
var_attr_list = [var_attr_tmp, var_attr_value]
var_attr_list = list(set(var_attr_list))
var_attrs_tmp[var_attr_key] = var_attr_list
var_attr_out = {}
for var_attr_key, var_attr_value in var_attrs_tmp.items():
if isinstance(var_attr_value, list) and var_attr_value.__len__() == 1:
var_attr_out[var_attr_key] = var_attr_value[0]
else:
var_attr_out[var_attr_key] = var_attr_value
return var_attr_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute WindSpeed
def computeWindSpeed(var_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_units=None, var_step_type=None):
# Set args
if var_step_type is None:
var_step_type = ['instant']
if var_units is None:
var_units = ['m s**-1']
if var_geo_y is None:
var_geo_y = ['latitude']
if var_geo_x is None:
var_geo_x = ['longitude']
if var_time is None:
var_time = ['valid_time']
# Parse args
var_name_1 = list(var_name)[0]
var_name_2 = list(var_name)[1]
var_name_3 = list(var_name)[2]
var_units = var_units[0]
var_step_type = var_step_type[0]
var_time = var_time[0]
var_geo_x = var_geo_x[0]
var_geo_y = var_geo_y[0]
# Get values
var_da_in_1 = var_dset[var_name_1]
var_values_in_1 = var_da_in_1.values
var_dims_in_1 = var_da_in_1.dims
var_da_in_2 = var_dset[var_name_2]
var_values_in_2 = var_da_in_2.values
var_dims_in_2 = var_da_in_2.dims
var_time = var_dset[var_name_1][var_time]
var_geo_x = var_dset[var_name_1][var_geo_x]
var_geo_y = var_dset[var_name_1][var_geo_y]
if (var_dims_in_1[0] == 'step') or (var_dims_in_1[0] == 'time'):
var_values_in_1 = reshape_var3d(var_values_in_1)
var_shape_in_1 = var_values_in_1.shape
if (var_dims_in_2[0] == 'step') or (var_dims_in_2[0] == 'time'):
var_values_in_2 = reshape_var3d(var_values_in_2)
var_shape_in_2 = var_values_in_2.shape
# Check attributes
if not (var_units == 'm s-1') and not (var_units == 'm s**-1'):
log_stream.error(' ===> Wind components units are not allowed! Check your data!')
raise IOError('Data units is not allowed!')
if not (var_step_type == 'instant') and not (var_step_type == 'instantaneous'):
log_stream.error(' ===> Wind components allowed only in istant format! Check your data!')
raise IOError('Data type is not allowed!')
if not var_shape_in_1 == var_shape_in_2:
log_stream.error(' ===> Wind dimensions are not the same! Check your data!')
raise IOError('Data dimensions are not allowed!')
else:
var_shape_in = list({var_shape_in_1, var_shape_in_2})[0]
var_values_out = np.zeros([var_shape_in[0], var_shape_in[1], var_shape_in[2]])
var_values_out[:, :, :] = np.nan
for var_step in range(0, var_shape_in[2]):
var_values_step_1 = var_values_in_1[:, :, var_step]
var_values_step_2 = var_values_in_2[:, :, var_step]
var_values_out[:, :, var_step] = np.sqrt(var_values_step_1 ** 2 + var_values_step_2 ** 2) * 0.7
var_da_in_1 = create_darray_3d(var_values_in_1, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_da_in_2 = create_darray_3d(var_values_in_2, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_da_out = create_darray_3d(var_values_out, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_dset_out = var_da_in_1.to_dataset(name=var_name_1)
var_dset_out[var_name_2] = var_da_in_2
var_dset_out[var_name_3] = var_da_out
return var_dset_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute WindSpeed
def computeRain(var_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_units=None, var_step_type=None):
# Set args
if var_step_type is None:
var_step_type = ['accum']
if var_units is None:
var_units = ['m']
if var_geo_y is None:
var_geo_y = ['latitude']
if var_geo_x is None:
var_geo_x = ['longitude']
if var_time is None:
var_time = ['valid_time']
# Parse args
var_name = list(var_name)[0]
var_units = var_units[0]
var_step_type = var_step_type[0]
var_time = var_time[0]
var_geo_x = var_geo_x[0]
var_geo_y = var_geo_y[0]
# Get values
var_da_in = var_dset[var_name]
var_values_in = var_da_in.values
var_dims_in = var_da_in.dims
var_time = var_dset[var_time]
var_geo_x = var_dset[var_geo_x]
var_geo_y = var_dset[var_geo_y]
if (var_units == 'kg m**-2') or (var_units == 'Kg m**-2'):
var_units = 'mm'
if var_units == 'm':
var_scale_factor = 0.001
elif var_units == 'mm':
var_scale_factor = 1
else:
log_stream.error(' ===> Rain components units are not allowed! Check your data!')
raise IOError('Selected units are not allowed!')
if (var_dims_in[0] == 'step') or (var_dims_in[0] == 'time'):
var_values_in = reshape_var3d(var_values_in)
var_shape_in = var_values_in.shape
# Check attributes
if not (var_units == 'mm') and not (var_units == 'm'):
log_stream.error(' ===> Rain components units are not allowed! Check your data!')
raise IOError('Data units is not allowed!')
if not (var_step_type == 'accum') and not (var_step_type == 'accumulated'):
log_stream.error(' ===> Rain components allowed only in istant format! Check your data!')
raise IOError('Data type is not allowed!')
var_values_out = np.zeros([var_shape_in[0], var_shape_in[1], var_shape_in[2]])
var_values_out[:, :, :] = np.nan
for var_step in range(0, var_shape_in[2]):
var_values_step = var_values_in[:, :, var_step]
var_values_out[:, :, var_step] = var_values_step / var_scale_factor
var_da_out = create_darray_3d(var_values_out, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_dset_out = var_da_out.to_dataset(name=var_name)
return var_dset_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute AirTemperature
def computeAirTemperature(var_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_units=None, var_step_type=None):
# Set args
if var_step_type is None:
var_step_type = ['instant']
if var_units is None:
var_units = ['K']
if var_geo_y is None:
var_geo_y = ['latitude']
if var_geo_x is None:
var_geo_x = ['longitude']
if var_time is None:
var_time = ['valid_time']
# Parse args
var_name = list(var_name)[0]
var_units = var_units[0]
var_step_type = var_step_type[0]
var_time = var_time[0]
var_geo_x = var_geo_x[0]
var_geo_y = var_geo_y[0]
# Get values
var_da_in = var_dset[var_name]
var_values_in = var_da_in.values
var_dims_in = var_da_in.dims
var_time = var_dset[var_time]
var_geo_x = var_dset[var_geo_x]
var_geo_y = var_dset[var_geo_y]
if (var_dims_in[0] == 'step') or (var_dims_in[0] == 'time'):
var_values_in = reshape_var3d(var_values_in)
var_shape_in = var_values_in.shape
# Check attributes
if not (var_units == 'K'):
log_stream.error(' ===> Air Temperature components units are not allowed! Check your data!')
raise IOError('Data units is not allowed!')
if not (var_step_type == 'instant') and not (var_step_type == 'instantaneous'):
log_stream.error(' ===> Air Temperature components allowed only in istant format! Check your data!')
raise IOError('Data type is not allowed!')
var_values_out = np.zeros([var_shape_in[0], var_shape_in[1], var_shape_in[2]])
var_values_out[:, :, :] = np.nan
for var_step in range(0, var_shape_in[2]):
var_values_step = var_values_in[:, :, var_step]
var_values_out[:, :, var_step] = var_values_step - 273.15
var_da_out = create_darray_3d(var_values_out, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_dset_out = var_da_out.to_dataset(name=var_name)
return var_dset_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute RelativeHumidity
def computeRelativeHumidity(var_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_units=None, var_step_type=None):
# Set args
if var_step_type is None:
var_step_type = ['instant']
if var_units is None:
var_units = ['%']
if var_geo_y is None:
var_geo_y = ['latitude']
if var_geo_x is None:
var_geo_x = ['longitude']
if var_time is None:
var_time = ['valid_time']
# Parse args
var_name = list(var_name)[0]
var_units = var_units[0]
var_step_type = var_step_type[0]
var_time = var_time[0]
var_geo_x = var_geo_x[0]
var_geo_y = var_geo_y[0]
# Get values
var_da_in = var_dset[var_name]
var_values_in = var_da_in.values
var_dims_in = var_da_in.dims
var_time = var_dset[var_time]
var_geo_x = var_dset[var_geo_x]
var_geo_y = var_dset[var_geo_y]
if (var_dims_in[0] == 'step') or (var_dims_in[0] == 'time'):
var_values_in = reshape_var3d(var_values_in)
var_shape_in = var_values_in.shape
# Check attributes
if not (var_units == '%'):
log_stream.error(' ===> Relative Humidity components units are not allowed! Check your data!')
raise IOError('Data units is not allowed!')
if not (var_step_type == 'instant') and not (var_step_type == 'instantaneous'):
log_stream.error(' ===> Relative Humidity components allowed only in istant format! Check your data!')
raise IOError('Data type is not allowed!')
var_values_out = np.zeros([var_shape_in[0], var_shape_in[1], var_shape_in[2]])
var_values_out[:, :, :] = np.nan
for var_step in range(0, var_shape_in[2]):
var_values_step = var_values_in[:, :, var_step]
var_idx_up_step = np.where(var_values_step > 100)
var_idx_down_step = np.where(var_values_step < 0)
var_values_step[var_idx_up_step[0], var_idx_up_step[1]] = 100
var_values_step[var_idx_down_step[0], var_idx_down_step[1]] = 0
var_values_out[:, :, var_step] = var_values_step
var_da_out = create_darray_3d(var_values_out, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_dset_out = var_da_out.to_dataset(name=var_name)
return var_dset_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to convert rain to cloud factor
def convertRain2CloudFactor(var_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_units=None, var_step_type=None):
# Set args
if var_step_type is None:
var_step_type = ['instant']
if var_units is None:
var_units = ['%']
if var_geo_y is None:
var_geo_y = ['latitude']
if var_geo_x is None:
var_geo_x = ['longitude']
if var_time is None:
var_time = ['valid_time']
# Parse args
var_name_1 = list(var_name)[0]
var_name_2 = list(var_name)[1]
var_units_1 = var_units[0]
var_units_2 = var_units[1]
var_step_type_1 = var_step_type[0]
var_step_type_2 = var_step_type[1]
var_time = var_time[0]
var_geo_x = var_geo_x[0]
var_geo_y = var_geo_y[0]
# Get values
var_da_in_1 = var_dset[var_name_1]
var_values_in_1 = var_da_in_1.values
var_dims_in_1 = var_da_in_1.dims
var_time = var_dset[var_time]
var_geo_x = var_dset[var_geo_x]
var_geo_y = var_dset[var_geo_y]
var_da_in_1 = create_darray_3d(var_values_in_1, var_time, var_geo_x, var_geo_y,
dim_key_time='valid_time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='step',
dims_order=['step', 'latitude', 'longitude'])
var_dset_in_1 = var_da_in_1.to_dataset(name=var_name_1)
var_dset_out = computeRain(var_dset_in_1, [var_name_1], var_units=[var_units_1], var_step_type=[var_step_type_1])
var_values_tmp = var_dset_out[var_name_1].values
var_values_cf = computeCloudFactor(var_values_tmp)
var_da_cf = create_darray_3d(var_values_cf, var_time, var_geo_x, var_geo_y,
dim_key_time='time', dim_key_x='longitude', dim_key_y='latitude',
dim_name_x='longitude', dim_name_y='latitude', dim_name_time='time',
dims_order=['latitude', 'longitude', 'time'])
var_dset_out[var_name_2] = var_da_cf
return var_dset_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute astronomic radiation
def computeAstronomicRadiation(var_dset, geo_dset, var_name,
var_time=None, var_geo_x=None, var_geo_y=None,
var_tag_cf="CloudFactor",
var_tag_ar="IncomingRadiation", var_tag_k="ShortWaveRadiation",
var_units='W/m^2', var_step_type='instant'):
| |
<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import expert_utils
import tensorflow.compat.v1 as tf
def multihead_graph_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
image_shapes=None,
attention_type="edge_vector",
name="multihead_graph_attention",
save_weights_to=None,
make_image_summary=True,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5,
vars_3d=False,
**kwargs):
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: an optional tensor of shape [batch, len_q, len_q]
containing edge vectors for attention
num_edge_types: number of edge types, an int
vars_3d: use 3-dimensional variables for input/output transformations
**kwargs (dict): Parameters for the attention function
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, output_depth]
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
vars_3d_num_heads = num_heads if vars_3d else None
with tf.variable_scope(
name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
q, k, v = common_attention.compute_qkv(
query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
vars_3d_num_heads=vars_3d_num_heads)
q = common_attention.split_heads(q, num_heads)
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
key_depth_per_head = total_key_depth // num_heads
if not vars_3d:
q *= key_depth_per_head**-0.5
additional_returned_value = None
if callable(attention_type): # Generic way to extend multihead_attention
x = attention_type(q, k, v, **kwargs)
if isinstance(x, tuple):
x, additional_returned_value = x # Unpack
elif attention_type == "edge_vector":
x = graph_attention(
q,
k,
v,
bias,
dropout_rate,
image_shapes,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=dropout_broadcast_dims,
adjacency_matrix=adjacency_matrix,
num_edge_types=num_edge_types)
x = common_attention.combine_heads(x)
# Set last dim specifically.
x.set_shape(x.shape.as_list()[:-1] + [total_value_depth])
if vars_3d:
o_var = tf.get_variable(
"o", [num_heads, total_value_depth // num_heads, output_depth])
o_var = tf.reshape(o_var, [total_value_depth, output_depth])
x = tf.tensordot(x, o_var, axes=1)
else:
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
if additional_returned_value is not None:
return x, additional_returned_value
return x
@expert_utils.add_name_scope()
def make_edge_vectors(adjacency_matrix,
num_edge_types,
depth,
name=None):
"""Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes, num_edge_types] tensor.
num_edge_types: Number of different edge types
depth: Number of channels
name: A optional string name for scoping
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors
"""
with tf.variable_scope(name, default_name="edge_vectors"):
att_adj_vectors_shape = [num_edge_types, depth]
adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)
adj_vectors = (
tf.get_variable(
"adj_vectors",
att_adj_vectors_shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) *
(depth**0.5))
att_adj_vectors = tf.matmul(
tf.reshape(tf.to_float(adjacency_matrix), [-1, num_edge_types]),
adj_vectors)
# Reshape to be [batch, num_nodes, num_nodes, depth].
att_adj_vectors = tf.reshape(att_adj_vectors, [
adjacency_matrix_shape[0], adjacency_matrix_shape[1],
adjacency_matrix_shape[2], depth
])
return att_adj_vectors
def graph_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5):
"""graph attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: optional matrix of [batch, length, length] ids indicating
edge type
num_edge_types: an int indicating number of edge types
Returns:
A Tensor of shape [batch, length, depth(q)]
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if adjacency_matrix is not None:
key_head_depth = common_layers.shape_list(q)[-1]
adjacency_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
key_head_depth,
name=name)
# transposing q to be [batch, length_q, heads, depth_k]
# to allow for matmul with [batch, length_q, length_q, depth_k]
q_t = tf.transpose(q, [0, 2, 1, 3])
adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True)
logits += tf.transpose(adj_logits, [0, 2, 1, 3])
# [batch, depth, num_nodes, num_nodes]
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if save_weights_to is not None:
save_weights_to[scope.name] = weights
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
common_attention.attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v)
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
"""
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. | |
not set
"source_scan_id": {"type": ["integer", "null"]},
# Scan UID of the source (scan of the standard), optional, null if not set
"source_scan_uid": {"type": ["string", "null"]}
}
}
def save_xrf_quant_fluor_json_file(file_path, fluor_data, *, overwrite_existing=False):
r"""
Save the results of processing of a scan data for XRF standard sample to a JSON file.
The saved data will be used later for quantitative analysis of experimental samples.
Parameters
----------
file_path: str
absolute or relative path to the saved JSON file. If the path does not exist, then
it is created.
fluor_data: dict
dictionary, which contains the results of processing of a scan of an XRF standard.
The dictionary should conform to ``_xrf_quantitative_fluorescence_schema``.
The schema is verified before saving to ensure that the data can be successfully read.
overwrite_existing: bool
indicates if existing file should be overwritten. Default is False, since
overwriting of an existing parameter file will lead to loss of data.
Returns
-------
no value is returned
Raises
------
IOError if the JSON file already exists and ``overwrite_existing`` is not enabled.
jsonschema.ValidationError if schema validation fails
"""
# Note: the schema is fixed (not passed as a parameter). If data format is changed,
# then the built-in schema must be changed. The same schema is always used
# both for reading and writing of data.
jsonschema.validate(instance=fluor_data, schema=_xrf_quant_fluor_schema)
# Make sure that the directory exists
file_path = os.path.expanduser(file_path)
file_path = os.path.abspath(file_path)
flp, _ = os.path.split(file_path)
os.makedirs(flp, exist_ok=True)
if not overwrite_existing and os.path.isfile(file_path):
raise IOError(f"File '{file_path}' already exists")
s_output = json.dumps(fluor_data, sort_keys=False, indent=4)
with open(file_path, "w") as f:
f.write(s_output)
def load_xrf_quant_fluor_json_file(file_path, *, schema=_xrf_quant_fluor_schema):
r"""
Load the quantitative data for XRF standard sample from JSON file and verify the schema.
Parameters
----------
file_path: str
absolute or relative path to JSON file. If file does not exist then IOError is raised.
schema: dict
reference to schema used for validation of the descriptions. If ``schema`` is ``None``,
then validation is disabled (this is not the default behavior).
Returns
-------
dictionary containing quantitative fluorescence data on XRF sample.
Raises
------
IOError is raised if the YAML file does not exist.
jsonschema.ValidationError is raised if schema validation fails.
"""
file_path = os.path.expanduser(file_path)
file_path = os.path.abspath(file_path)
if not os.path.isfile(file_path):
raise IOError(f"File '{file_path}' does not exist")
with open(file_path, 'r') as f:
fluor_data = json.load(f)
if schema is not None:
jsonschema.validate(instance=fluor_data, schema=schema)
return fluor_data
def get_quant_fluor_data_dict(quant_param_dict, incident_energy):
r"""
Create the dictionary used for storage of data on XRF reference sample. The field
``element_lines`` is the dictionary, which stores data on density density of the
element (in the sample) and fluorescence of the emission line (computed later
during processing of the reference scan.
Parameters
----------
quant_param_dict: dict
Dictionary with the information on reference sample (loaded from YAML configuration
file). The dictionary should satifsfy the ``_xrf_standard_schema`` schema.
incident_energy: float
Incident beam energy
Returns
-------
quant_fluor_data_dict: dict
Dictionary that contains data on XRF reference sample, including the field
``element_lines``, which relates the emission lines (active at ``incident_energy``)
with respective fluorescence (area under the line spectra) and density of the element.
The fluorescence is set to None and needs to be computed later. The dictionary
should satisfy the ``_xrf_quant_fluor_schema`` schema.
"""
if incident_energy is not None:
# Make sure that it is 'float', not 'float64', since 'float64' is not supported by 'yaml' package
incident_energy = float(max(incident_energy, 0))
quant_fluor_data_dict = {}
quant_fluor_data_dict["name"] = quant_param_dict["name"]
quant_fluor_data_dict["serial"] = quant_param_dict["serial"]
quant_fluor_data_dict["description"] = quant_param_dict["description"]
# Find the density (mass) of each element in the mis of compounds.
# Note, that the sample may contain the same element as a component of multiple compounds.
element_dict = {}
for compound, mass in quant_param_dict["compounds"].items():
# Split compound/compound_density into elements/element_density
el_and_mass = split_compound_mass(compound, mass)
for el, ms in el_and_mass.items():
if el in element_dict:
element_dict[el] += ms
else:
element_dict[el] = ms
# Create the dictionary of element lines. Fluorescence is unknown at this point,
# so it is always None.
element_lines = {}
for el, ms in element_dict.items():
lines = generate_eline_list([el], incident_energy=incident_energy)
e = {_: {"density": ms, "fluorescence": None} for _ in lines}
element_lines.update(e)
quant_fluor_data_dict["element_lines"] = element_lines
quant_fluor_data_dict["incident_energy"] = incident_energy
quant_fluor_data_dict["detector_channel"] = None
quant_fluor_data_dict["scaler_name"] = None
quant_fluor_data_dict["distance_to_sample"] = None
quant_fluor_data_dict["creation_time_local"] = None
quant_fluor_data_dict["source_scan_id"] = None
quant_fluor_data_dict["source_scan_uid"] = None
return quant_fluor_data_dict
def fill_quant_fluor_data_dict(quant_fluor_data_dict, *, xrf_map_dict, scaler_name):
r"""
Computes average normalized fluorescence values for element lines that are part of
the reference sample and listed in ``quant_fluor_data_dict["element_lines"]`` and
present in the ``xrf_map_dict`` and writes the result to
``quant_fluor_data_dict["element_lines"][<element_line>]["fluorescence"]``.
Fluorescence is set to None for the element lines that are not present in ``xrf_map_dict``.
Element lines that are present in ``xrf_map_dict``, but not part of the reference
standard, are ignored. If `scaler_name` is one of the keys of ``xrf_map_dict``, then
fluorescence map is normalized by the scaler before average value is computed. If
``scaler_name`` is not one of the keys of ``xrf_map_dict`` or set to None, then
the average fluorescence is computed without normalization.
Pixels along the edges of the map are very likely to contain outliers, so the edges
are not used in computation whenever sufficient data is available (if map contains
more than 2 pixels along a dimension, the first and the last pixels are not used
for averaging)
Parameters
----------
quant_fluor_data_dict: dict
Dictionary with XRF reference sample data. This dictionary is modified by the function.
The dictionary must satisfy the '_xrf_quant_fluor_schema' schema.
xrf_map_dict: dict(array)
The dictionary of 2D ndarrays, which contain XRF maps for element lines and scalers.
Dictionary keys are the names of the emission lines (e.g. Fe_K, S_K, Au_M etc.)
scaler_name: str
Scaler name. In order for the scaler to be applied, the name must match one of the
keys of `xrf_map_dict'. If the scaler name is not one of the list keys or set to None,
then fluorescence is not normalized
Returns
No value is returned. The computed fluorescence for the element lines is saved to
``quant_fluor_data_dict["element_lines"][<element_line>]["fluorescence"]``
"""
if not scaler_name:
logger.warning(f"No scaler is selected for computing quantitative coefficients. Data is not normalized.")
elif scaler_name not in xrf_map_dict:
logger.warning(f"Scaler '{scaler_name}' is not in XRF map dictionary. Normalization can not be performed.")
scaler_name = None
# Clear ALL fluorescence values. Don't touch any other data
for eline, info in quant_fluor_data_dict["element_lines"].items():
info["fluorescence"] = None
# Save the scaler name
quant_fluor_data_dict["scaler_name"] = scaler_name
# Compute fluorescence of the emission lines
eline_list = tuple(quant_fluor_data_dict["element_lines"].keys())
for eline, map in xrf_map_dict.items():
if eline in eline_list:
# Normalize the map if scaler is selected. (Typically scaler IS selected.)
if scaler_name:
norm_map = normalize_data_by_scaler(xrf_map_dict[eline], xrf_map_dict[scaler_name])
else:
norm_map = xrf_map_dict[eline]
# Ignore pixels along the edges (those pixels are likely to be outliers that will visibly bias
# the mean value in small calibration scans). If scan is smaller that 2 pixels along any
# dimension, then all pixels are used, including edges.
def _get_range(n_elements):
if n_elements > 2:
n_min, n_max = 1, n_elements - 1
else:
n_min, n_max = 0, n_elements
return n_min, n_max
ny_min, ny_max = _get_range(norm_map.shape[0])
nx_min, nx_max = _get_range(norm_map.shape[1])
mean_fluor = np.mean(norm_map[ny_min: ny_max, nx_min: nx_max])
# Note: numpy 'float64' is explicitely converted to 'float'
# (yaml package does not seem to support 'float64')
quant_fluor_data_dict["element_lines"][eline]["fluorescence"] = float(mean_fluor)
def prune_quant_fluor_data_dict(quant_fluor_data_dict):
r"""
Prunes the fluorescence data dictionary by removing the element lines that are not
present (fluorescence is None) or have fluorescence <= 0. 'Pruning' is performed before
saving calibration data, so that only meaningful information is saved.
The function does not modify the original data structure. Instead it returns the
copy of the original dictionary with some emission line removed.
Parameters
----------
quant_fluor_data_dict: dict
Dictionary with XRF reference sample data. This dictionary is modified by the function.
The dictionary must satisfy the '_xrf_quant_fluor_schema' schema.
Returns
-------
Copy of ``quant_fluor_data_dict`` with some emission lines removed. Only the emission
lines that have fluorescence set to valid value are left.
"""
quant_fluor_data_dict = copy.deepcopy(quant_fluor_data_dict)
for key, val in quant_fluor_data_dict["element_lines"].copy().items():
if (val["fluorescence"] is None) or (val["fluorescence"] <= 0):
del quant_fluor_data_dict["element_lines"][key]
return | |
<reponame>milesgray/CALAE
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the adaptive form of the loss.
You should only use this function if 1) you want the loss to change it's shape
during training (otherwise use general.py) or 2) you want to impose the loss on
a wavelet or DCT image representation, a only this function has easy support for
that.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from . import distribution
from . import util
from . import wavelet
class AdaptiveLossFunction(nn.Module):
"""The adaptive loss function on a matrix.
This class behaves differently from general.lossfun() and
distribution.nllfun(), which are "stateless", allow the caller to specify the
shape and scale of the loss, and allow for arbitrary sized inputs. This
class only allows for rank-2 inputs for the residual `x`, and expects that
`x` is of the form [batch_index, dimension_index]. This class then
constructs free parameters (torch Parameters) that define the alpha and scale
parameters for each dimension of `x`, such that all alphas are in
(`alpha_lo`, `alpha_hi`) and all scales are in (`scale_lo`, Infinity).
The assumption is that `x` is, say, a matrix where x[i,j] corresponds to a
pixel at location j for image i, with the idea being that all pixels at
location j should be modeled with the same shape and scale parameters across
all images in the batch. If the user wants to fix alpha or scale to be a
constant,
this can be done by setting alpha_lo=alpha_hi or scale_lo=scale_init
respectively.
"""
def __init__(self,
num_dims,
float_dtype,
device,
alpha_lo=0.001,
alpha_hi=1.999,
alpha_init=None,
scale_lo=1e-5,
scale_init=1.0):
"""Sets up the loss function.
Args:
num_dims: The number of dimensions of the input to come.
float_dtype: The floating point precision of the inputs to come.
device: The device to run on (cpu, cuda, etc).
alpha_lo: The lowest possible value for loss's alpha parameters, must be
>= 0 and a scalar. Should probably be in (0, 2).
alpha_hi: The highest possible value for loss's alpha parameters, must be
>= alpha_lo and a scalar. Should probably be in (0, 2).
alpha_init: The value that the loss's alpha parameters will be initialized
to, must be in (`alpha_lo`, `alpha_hi`), unless `alpha_lo` == `alpha_hi`
in which case this will be ignored. Defaults to (`alpha_lo` +
`alpha_hi`) / 2
scale_lo: The lowest possible value for the loss's scale parameters. Must
be > 0 and a scalar. This value may have more of an effect than you
think, as the loss is unbounded as scale approaches zero (say, at a
delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so
SGD may cause optimization to gravitate towards producing scales near
this value.
"""
super(AdaptiveLossFunction, self).__init__()
if not np.isscalar(alpha_lo):
raise ValueError('`alpha_lo` must be a scalar, but is of type {}'.format(
type(alpha_lo)))
if not np.isscalar(alpha_hi):
raise ValueError('`alpha_hi` must be a scalar, but is of type {}'.format(
type(alpha_hi)))
if alpha_init is not None and not np.isscalar(alpha_init):
raise ValueError(
'`alpha_init` must be None or a scalar, but is of type {}'.format(
type(alpha_init)))
if not alpha_lo >= 0:
raise ValueError('`alpha_lo` must be >= 0, but is {}'.format(alpha_lo))
if not alpha_hi >= alpha_lo:
raise ValueError('`alpha_hi` = {} must be >= `alpha_lo` = {}'.format(
alpha_hi, alpha_lo))
if alpha_init is not None and alpha_lo != alpha_hi:
if not (alpha_init > alpha_lo and alpha_init < alpha_hi):
raise ValueError(
'`alpha_init` = {} must be in (`alpha_lo`, `alpha_hi`) = ({} {})'
.format(alpha_init, alpha_lo, alpha_hi))
if not np.isscalar(scale_lo):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(
type(scale_lo)))
if not np.isscalar(scale_init):
raise ValueError(
'`scale_init` must be a scalar, but is of type {}'.format(
type(scale_init)))
if not scale_lo > 0:
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if not scale_init >= scale_lo:
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(
scale_init, scale_lo))
self.num_dims = num_dims
if float_dtype == np.float32:
float_dtype = torch.float32
if float_dtype == np.float64:
float_dtype = torch.float64
self.float_dtype = float_dtype
self.device = device
if isinstance(device, int) or\
(isinstance(device, str) and 'cuda' in device) or\
(isinstance(device, torch.device) and device.type == 'cuda'):
torch.cuda.set_device(self.device)
self.distribution = distribution.Distribution()
if alpha_lo == alpha_hi:
# If the range of alphas is a single item, then we just fix `alpha` to be
# a constant.
self.fixed_alpha = torch.tensor(
alpha_lo, dtype=self.float_dtype,
device=self.device)[np.newaxis, np.newaxis].repeat(1, self.num_dims)
self.alpha = lambda: self.fixed_alpha
else:
# Otherwise we construct a "latent" alpha variable and define `alpha`
# As an affine function of a sigmoid on that latent variable, initialized
# such that `alpha` starts off as `alpha_init`.
if alpha_init is None:
alpha_init = (alpha_lo + alpha_hi) / 2.
latent_alpha_init = util.inv_affine_sigmoid(
alpha_init, lo=alpha_lo, hi=alpha_hi)
self.register_parameter(
'latent_alpha',
torch.nn.Parameter(
latent_alpha_init.clone().detach().to(
dtype=self.float_dtype,
device=self.device)[np.newaxis, np.newaxis].repeat(
1, self.num_dims),
requires_grad=True))
self.alpha = lambda: util.affine_sigmoid(
self.latent_alpha, lo=alpha_lo, hi=alpha_hi)
if scale_lo == scale_init:
# If the difference between the minimum and initial scale is zero, then
# we just fix `scale` to be a constant.
self.fixed_scale = torch.tensor(
scale_init, dtype=self.float_dtype,
device=self.device)[np.newaxis, np.newaxis].repeat(1, self.num_dims)
self.scale = lambda: self.fixed_scale
else:
# Otherwise we construct a "latent" scale variable and define `scale`
# As an affine function of a softplus on that latent variable.
self.register_parameter(
'latent_scale',
torch.nn.Parameter(
torch.zeros((1, self.num_dims)).to(
dtype=self.float_dtype, device=self.device),
requires_grad=True))
self.scale = lambda: util.affine_softplus(
self.latent_scale, lo=scale_lo, ref=scale_init)
def lossfun(self, x, **kwargs):
"""Computes the loss on a matrix.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension must be equal to self.num_dims. Must be a tensor or
numpy array of type self.float_dtype.
**kwargs: Arguments to be passed to the underlying distribution.nllfun().
Returns:
A tensor of the same type and shape as input `x`, containing the loss at
each element of `x`. These "losses" are actually negative log-likelihoods
(as produced by distribution.nllfun()) and so they are not actually
bounded from below by zero. You'll probably want to minimize their sum or
mean.
"""
x = torch.as_tensor(x)
assert len(x.shape) == 2
assert x.shape[1] == self.num_dims
assert x.dtype == self.float_dtype
return self.distribution.nllfun(x, self.alpha(), self.scale(), **kwargs)
class StudentsTLossFunction(nn.Module):
"""A variant of AdaptiveLossFunction that uses a Student's t-distribution."""
def __init__(self,
num_dims,
float_dtype,
device,
scale_lo=1e-5,
scale_init=1.0):
"""Sets up the adaptive loss for a matrix of inputs.
Args:
num_dims: The number of dimensions of the input to come.
float_dtype: The floating point precision of the inputs to come.
device: The device to run on (cpu, cuda, etc).
scale_lo: The lowest possible value for the loss's scale parameters. Must
be > 0 and a scalar. This value may have more of an effect than you
think, as the loss is unbounded as scale approaches zero (say, at a
delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so
SGD may cause optimization to gravitate towards producing scales near
this value.
"""
super(StudentsTLossFunction, self).__init__()
if not np.isscalar(scale_lo):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(
type(scale_lo)))
if not np.isscalar(scale_init):
raise ValueError(
'`scale_init` must be a scalar, but is of type {}'.format(
type(scale_init)))
if not scale_lo > 0:
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if not scale_init >= scale_lo:
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(
scale_init, scale_lo))
self.num_dims = num_dims
if float_dtype == np.float32:
float_dtype = torch.float32
if float_dtype == np.float64:
float_dtype = torch.float64
self.float_dtype = float_dtype
| |
import numpy as np
import os
import torch
import torch.nn as nn
import time
import importlib
from utils.utils import logger_print
import hdf5storage
from utils.utils import cal_pesq
from utils.utils import cal_stoi
from utils.utils import cal_sisnr
train_epoch, val_epoch, val_metric_epoch = [], [], [] # for loss, loss and metric score
# from torch.utils.tensorboard import SummaryWriter
class Solver(object):
def __init__(self,
data,
net,
optimizer,
save_name_dict,
args,
):
self.train_dataloader = data["train_loader"]
self.val_dataloader = data["val_loader"]
self.net = net
# optimizer part
self.optimizer = optimizer
self.lr = args["optimizer"]["lr"]
self.gradient_norm = args["optimizer"]["gradient_norm"]
self.epochs = args["optimizer"]["epochs"]
self.halve_lr = args["optimizer"]["halve_lr"]
self.early_stop = args["optimizer"]["early_stop"]
self.halve_freq = args["optimizer"]["halve_freq"]
self.early_stop_freq = args["optimizer"]["early_stop_freq"]
self.print_freq = args["optimizer"]["print_freq"]
self.metric_options = args["optimizer"]["metric_options"]
# loss part
self.loss_path = args["loss_function"]["path"]
self.stagewise_loss = args["loss_function"]["stagewise"]["classname"]
self.com_mag_loss = args["loss_function"]["com_mag"]["classname"]
self.prev_weight = args["loss_function"]["prev_weight"]
self.curr_weight = args["loss_function"]["curr_weight"]
self.alpha = args["loss_function"]["alpha"]
self.l_type = args["loss_function"]["l_type"]
# signal part
self.sr = args["signal"]["sr"]
self.win_size = args["signal"]["win_size"]
self.win_shift = args["signal"]["win_shift"]
self.fft_num = args["signal"]["fft_num"]
self.is_compress = args["signal"]["is_compress"]
# path part
self.is_checkpoint = args["path"]["is_checkpoint"]
self.is_resume_reload = args["path"]["is_resume_reload"]
self.checkpoint_load_path = args["path"]["checkpoint_load_path"]
self.checkpoint_load_filename = args["path"]["checkpoint_load_filename"]
self.loss_save_path = args["path"]["loss_save_path"]
self.model_best_path = args["path"]["model_best_path"]
# sava name
self.loss_save_filename = save_name_dict["loss_filename"]
self.best_model_save_filename = save_name_dict["best_model_filename"]
self.checkpoint_save_filename = save_name_dict["checkpoint_filename"]
self.train_loss = torch.Tensor(self.epochs)
self.val_loss = torch.Tensor(self.epochs)
# set loss funcs
loss_module = importlib.import_module(self.loss_path)
self.stagewise_loss = getattr(loss_module, self.stagewise_loss)(self.prev_weight, self.alpha,
self.l_type)
self.com_mag_loss = getattr(loss_module, self.com_mag_loss)(self.alpha, self.l_type)
self._reset()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# summarywriter
# self.tensorboard_path = "./" + args["path"]["logging_path"] + "/" + args["save"]["tensorboard_filename"]
# if not os.path.exists(self.tensorboard_path):
# os.makedirs(self.tensorboard_path)
#self.writer = SummaryWriter(self.tensorboard_path, max_queue=5, flush_secs=30)
def _reset(self):
# Reset
if self.is_resume_reload:
checkpoint = torch.load(os.path.join(self.checkpoint_load_path, self.checkpoint_load_filename))
self.net.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.start_epoch = checkpoint["start_epoch"]
self.prev_val_loss = checkpoint["val_loss"] # val loss
self.prev_val_metric = checkpoint["val_metric"]
self.best_val_metric = checkpoint["best_val_metric"]
self.val_no_impv = checkpoint["val_no_impv"]
self.halving = checkpoint["halving"]
else:
self.start_epoch = 0
self.prev_val_loss = float("inf")
self.prev_val_metric = -float("inf")
self.best_val_metric = -float("inf")
self.val_no_impv = 0
self.halving = False
def train(self):
logger_print("Begin to train....")
self.net.to(self.device)
for epoch in range(self.start_epoch, self.epochs):
begin_time = time.time()
# training phase
logger_print("-" * 90)
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logger_print(f"Epoch id:{int(epoch + 1)}, Training phase, Start time:{start_time}")
self.net.train()
train_avg_loss = self._run_one_epoch(epoch, val_opt=False)
# self.writer.add_scalar(f"Loss/Training_Loss", train_avg_loss, epoch)
end_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logger_print(f"Epoch if:{int(epoch + 1)}, Training phase, End time:{end_time}, "
f"Training loss:{train_avg_loss}")
# Cross val
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logger_print(f"Epoch id:{int(epoch + 1)}, Validation phase, Start time:{start_time}")
self.net.eval() # norm and dropout is off
val_avg_loss, val_avg_metric = self._run_one_epoch(epoch, val_opt=True)
# self.writer.add_scalar(f"Loss/Validation_Loss", val_avg_loss, epoch)
# self.writer.add_scalar(f"Loss/Validation_Metric", val_avg_metric, epoch)
end_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logger_print(f"Epoch if:{int(epoch + 1)}, Validation phase, End time:{end_time}, "
f"Validation loss:{val_avg_loss}, Validation metric score:{val_avg_metric}")
end_time = time.time()
print(f"{end_time-begin_time}s in {epoch+1}th epoch")
logger_print("-" * 90)
# whether to save checkpoint at current epoch
if self.is_checkpoint:
cpk_dic = {}
cpk_dic["model_state_dict"] = self.net.state_dict()
cpk_dic["optimizer_state_dict"] = self.optimizer.state_dict()
cpk_dic["train_loss"] = train_avg_loss
cpk_dic["val_loss"] = val_avg_loss
cpk_dic["val_metric"] = val_avg_metric
cpk_dic["best_val_metric"] = self.best_val_metric
cpk_dic["start_epoch"] = epoch+1
cpk_dic["val_no_impv"] = self.val_no_impv
cpk_dic["halving"] = self.halving
torch.save(cpk_dic, os.path.join(self.checkpoint_load_path, "Epoch_{}_{}_{}".format(epoch+1,
self.net.__class__.__name__, self.checkpoint_save_filename)))
# record loss
# self.train_loss[epoch] = train_avg_loss
# self.val_loss[epoch] = val_avg_loss
train_epoch.append(train_avg_loss)
val_epoch.append(val_avg_loss)
val_metric_epoch.append(val_avg_metric)
# save loss
loss = {}
loss["train_loss"] = train_epoch
loss["val_loss"] = val_epoch
loss["val_metric"] = val_metric_epoch
if not self.is_resume_reload:
hdf5storage.savemat(os.path.join(self.loss_save_path, self.loss_save_filename), loss)
else:
hdf5storage.savemat(os.path.join(self.loss_save_path, "resume_cpk_{}".format(self.loss_save_filename)),
loss)
# lr halve and Early stop
if self.halve_lr:
if val_avg_metric <= self.prev_val_metric:
self.val_no_impv += 1
if self.val_no_impv == self.halve_freq:
self.halving = True
if (self.val_no_impv >= self.early_stop_freq) and self.early_stop:
logger_print("No improvements and apply early-stopping")
break
else:
self.val_no_impv = 0
if self.halving:
optim_state = self.optimizer.state_dict()
optim_state["param_groups"][0]["lr"] = optim_state["param_groups"][0]["lr"] / 2.0
self.optimizer.load_state_dict(optim_state)
logger_print("Learning rate is adjusted to %5f" % (optim_state["param_groups"][0]["lr"]))
self.halving = False
self.prev_val_metric = val_avg_metric
if val_avg_metric > self.best_val_metric:
self.best_val_metric = val_avg_metric
torch.save(self.net.state_dict(), os.path.join(self.model_best_path, self.best_model_save_filename))
logger_print(f"Find better model, saving to {self.best_model_save_filename}")
else:
logger_print("Did not find better model")
@torch.no_grad()
def _val_batch(self, batch_info):
batch_mix_wav = batch_info.feats.to(self.device) # (B,L)
batch_target_wav = batch_info.labels.to(self.device) # (B,L)
batch_wav_len_list = batch_info.frame_mask_list
real_len = batch_mix_wav.shape[-1]
# stft
b_size, wav_len = batch_mix_wav.shape
win_size, win_shift = int(self.sr*self.win_size), int(self.sr*self.win_shift)
batch_mix_stft = torch.stft(
batch_mix_wav,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(self.device)) # (B,F,T,2)
batch_target_stft = torch.stft(
batch_target_wav,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(self.device)) # (B,F,T,2)
batch_frame_list = []
for i in range(len(batch_wav_len_list)):
curr_frame_num = (batch_wav_len_list[i]-win_size+win_size)//win_shift+1 # center case
batch_frame_list.append(curr_frame_num)
_, freq_num, seq_len, _ = batch_mix_stft.shape
if self.is_compress:
# target
batch_target_mag, batch_target_phase = torch.norm(batch_target_stft, dim=-1)**0.5, \
torch.atan2(batch_target_stft[...,-1],
batch_target_stft[...,0])
batch_target_stft = torch.stack((batch_target_mag*torch.cos(batch_target_phase),
batch_target_mag*torch.sin(batch_target_phase)), dim=-1)
# convert, mix: (B,2,T,F), target: (B,2,F,T)
batch_mix_stft = batch_mix_stft.permute(0,3,2,1) # (B,2,T,F)
batch_target_stft = batch_target_stft.permute(0,3,2,1) # (B,2,T,F)
# net predict
_, _, batch_s_est = self.net(batch_mix_stft)
# cal final loss
batch_s_loss = self.com_mag_loss(batch_s_est, batch_target_stft, batch_frame_list)
# cal metric loss, (B,F,T,2)
batch_mix_stft = batch_mix_stft.permute(0,3,2,1) # (B,F,T,2)
batch_s_est = batch_s_est.permute(0,3,2,1) # (B,F,T,2)
batch_target_stft = batch_target_stft.permute(0,3,2,1) # (B,F,T,2)
if self.is_compress:
# est
batch_spec_mag, batch_spec_phase = torch.norm(batch_s_est, dim=-1)**2.0,\
torch.atan2(batch_s_est[..., -1], batch_s_est[...,0])
batch_est_stft = torch.stack((batch_spec_mag*torch.cos(batch_spec_phase),
batch_spec_mag*torch.sin(batch_spec_phase)), dim=-1)
# target
batch_target_mag, batch_target_phase = torch.norm(batch_target_stft, dim=-1)**2.0, \
torch.atan2(batch_target_stft[...,-1], batch_target_stft[...,0])
batch_target_stft = torch.stack((batch_target_mag*torch.cos(batch_target_phase),
batch_target_mag*torch.sin(batch_target_phase)), dim=-1)
batch_mix_wav = torch.istft(batch_mix_stft,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(self.device),
length=real_len
) # (B,L)
batch_est_wav = torch.istft(batch_est_stft,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(self.device),
length=real_len
) # (B,L)
batch_target_wav = torch.istft(batch_target_stft,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(self.device),
length=real_len
) # (B,L)
loss_dict = {}
loss_dict["mse_loss"] = self.curr_weight*batch_s_loss.item()
# create mask
mask_list = []
for id in range(b_size):
mask_list.append(torch.ones((batch_wav_len_list[id])))
wav_mask = torch.nn.utils.rnn.pad_sequence(mask_list, batch_first=True).to(batch_mix_stft.device) # (B,L)
batch_mix_wav, batch_target_wav, batch_est_wav = (batch_mix_wav*wav_mask).cpu().numpy(), \
(batch_target_wav*wav_mask).cpu().numpy(), \
(batch_est_wav*wav_mask).cpu().numpy()
if "SISNR" in self.metric_options:
unpro_score_list, pro_score_list = [], []
for id in range(batch_mix_wav.shape[0]):
unpro_score_list.append(cal_sisnr(id, batch_mix_wav, batch_target_wav, self.sr))
pro_score_list.append(cal_sisnr(id, batch_est_wav, batch_target_wav, self.sr))
unpro_score_list, pro_score_list = np.asarray(unpro_score_list), np.asarray(pro_score_list)
unpro_sisnr_mean_score, pro_sisnr_mean_score = np.mean(unpro_score_list), np.mean(pro_score_list)
loss_dict["unpro_metric"] = unpro_sisnr_mean_score
loss_dict["pro_metric"] = pro_sisnr_mean_score
if "NB-PESQ" in self.metric_options:
unpro_score_list, pro_score_list = [], []
for id in range(batch_mix_wav.shape[0]):
unpro_score_list.append(cal_pesq(id, batch_mix_wav, batch_target_wav, self.sr))
pro_score_list.append(cal_pesq(id, batch_est_wav, batch_target_wav, self.sr))
unpro_score_list, pro_score_list = np.asarray(unpro_score_list), \
np.asarray(pro_score_list)
unpro_pesq_mean_score, pro_pesq_mean_score = np.mean(unpro_score_list), np.mean(pro_score_list)
loss_dict["unpro_metric"] = unpro_pesq_mean_score
loss_dict["pro_metric"] = pro_pesq_mean_score
if "ESTOI" in self.metric_options:
unpro_score_list, pro_score_list = [], []
for id in range(batch_mix_wav.shape[0]):
unpro_score_list.append(cal_stoi(id, batch_mix_wav, batch_target_wav, self.sr))
pro_score_list.append(cal_stoi(id, batch_mix_wav, batch_target_wav, self.sr))
unpro_score_list, pro_score_list = np.asarray(unpro_score_list), \
np.asarray(pro_score_list)
unpro_estoi_mean_score, pro_estoi_mean_score = np.mean(unpro_score_list), np.mean(pro_score_list)
loss_dict["unpro_metric"] = unpro_estoi_mean_score
loss_dict["pro_metric"] = pro_estoi_mean_score
return loss_dict
def _train_batch(self, batch_info):
batch_mix_wav = batch_info.feats.to(self.device) # (B,L)
batch_target_wav = batch_info.labels.to(self.device) # (B,L)
batch_noise_wav = batch_mix_wav - batch_target_wav # (B,L)
batch_wav_len_list = batch_info.frame_mask_list
# stft
b_size, wav_len = batch_mix_wav.shape
win_size, win_shift = int(self.sr*self.win_size), int(self.sr*self.win_shift)
batch_mix_stft = torch.stft(
batch_mix_wav,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(batch_mix_wav.device)) # (B,F,T,2)
batch_target_stft = torch.stft(
batch_target_wav,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(batch_target_wav.device)) # (B,F,T,2)
batch_noise_stft = torch.stft(
batch_noise_wav,
n_fft=self.fft_num,
hop_length=win_shift,
win_length=win_size,
window=torch.hann_window(win_size).to(batch_noise_wav.device)) # (B,F,T,2)
batch_frame_list = []
for i in range(len(batch_wav_len_list)):
curr_frame_num = (batch_wav_len_list[i] - win_size + win_size) // win_shift + 1
batch_frame_list.append(curr_frame_num)
if self.is_compress: # here only apply to target and bf as feat-compression has been applied within the network
# noise
batch_noise_mag, batch_noise_phase = torch.norm(batch_noise_stft, dim=-1)**0.5, \
torch.atan2(batch_noise_stft[..., -1], batch_noise_stft[..., 0])
batch_noise_stft = torch.stack((batch_noise_mag*torch.cos(batch_noise_phase),
batch_noise_mag*torch.sin(batch_noise_phase)), dim=-1)
# target
batch_target_mag, batch_target_phase = torch.norm(batch_target_stft, dim=-1)**0.5, \
torch.atan2(batch_target_stft[..., -1],
batch_target_stft[..., 0])
batch_target_stft = torch.stack((batch_target_mag*torch.cos(batch_target_phase),
batch_target_mag*torch.sin(batch_target_phase)), dim=-1)
# convert, (B,2,T,F)
batch_mix_stft = batch_mix_stft.permute(0,3,2,1) # (B,2,T,F)
batch_target_stft = batch_target_stft.permute(0,3,1,2) # (B,2,F,T)
batch_noise_stft = batch_noise_stft.permute(0,3,1,2) # (B,2,F,T)
with torch.enable_grad():
batch_s_est_list, batch_n_est_list, batch_s_esti = self.net(batch_mix_stft) # (B,2,F,T)
# stagewise loss for speech
batch_stagewise_s_loss = self.stagewise_loss(batch_s_est_list, batch_target_stft, batch_frame_list)
# stagewise loss for noise
batch_stagewise_n_loss = self.stagewise_loss(batch_n_est_list, batch_noise_stft, batch_frame_list)
# final loss
batch_s_loss = self.com_mag_loss(batch_s_esti, batch_target_stft.transpose(-2, -1), batch_frame_list)
batch_loss = batch_stagewise_n_loss + batch_stagewise_s_loss + self.curr_weight*batch_s_loss
# params update
self.update_params(batch_loss)
loss_dict = {}
loss_dict["stagewise_s_loss"] = batch_stagewise_s_loss
loss_dict["stagewise_n_loss"] = batch_stagewise_n_loss
loss_dict["target_loss"] = self.curr_weight*batch_s_loss.item()
return loss_dict
def _run_one_epoch(self, epoch, val_opt=False):
# training phase
if not val_opt:
data_loader = self.train_dataloader
total_s_loss = 0.
start_time = time.time()
for batch_id, batch_info in enumerate(data_loader.get_data_loader()):
loss_dict = self._train_batch(batch_info)
total_s_loss += loss_dict["target_loss"]
if batch_id % self.print_freq == 0:
logger_print(
"Epoch:{:d}, Iter:{:d}, Average loss:{:.4f}, Time: {:d}ms/batch".
format(epoch+1, int(batch_id), total_s_loss/(batch_id+1),
int(1000*(time.time()-start_time)/(batch_id+1))))
return total_s_loss / (batch_id+1)
else: # validation phase
data_loder = self.val_dataloader
total_sp_loss, total_pro_metric_loss, total_unpro_metric_loss = 0., 0., 0.
start_time = time.time()
for batch_id, batch_info in enumerate(data_loder.get_data_loader()):
loss_dict = self._val_batch(batch_info)
assert len(self.metric_options) == 1, "only one metric is supported to output in the val phase"
total_sp_loss += loss_dict["mse_loss"]
total_unpro_metric_loss += loss_dict["unpro_metric"]
total_pro_metric_loss += loss_dict["pro_metric"]
if batch_id % self.print_freq == 0:
logger_print(
"Epoch:{:d}, Iter:{:d}, Average spectral loss:{:.4f}, Average unpro metric score:{:.4f}, "
"Average pro metric score:{:.4f}, Time: {:d}ms/batch".
format(epoch+1, int(batch_id), total_sp_loss/(batch_id+1), total_unpro_metric_loss/(batch_id+1),
total_pro_metric_loss/(batch_id+1), int(1000*(time.time()-start_time)/(batch_id+1))))
return total_sp_loss / (batch_id+1), total_pro_metric_loss / (batch_id)
def update_params(self, loss):
self.optimizer.zero_grad()
loss.backward()
if self.gradient_norm >= 0.0:
| |
toggle invert if set
toggle = 0 #Used to toggle invert between groups of airports. Leave set at 0
#Add update message to beginning of list
sortwindslist.insert(0,("Updated", dt_string))
#Add type of data being displayed, METAR, TAF, MOS etc
if metar_taf_mos == 1: #Displaying METAR data
sortwindslist.insert(0,("METARs", "Displayed"))
elif metar_taf_mos == 0: #TAF hour_to_display
if toggle_sw == 0:
sortwindslist.insert(0,(str(time_sw0) + " hr TAF", "Displayed"))
if toggle_sw == 1:
sortwindslist.insert(0,(str(time_sw1) + " hr TAF", "Displayed"))
if toggle_sw == 2:
sortwindslist.insert(0,(str(time_sw2) + " hr TAF", "Displayed"))
if toggle_sw == 3:
sortwindslist.insert(0,(str(time_sw3) + " hr TAF", "Displayed"))
if toggle_sw == 4:
sortwindslist.insert(0,(str(time_sw4) + " hr TAF", "Displayed"))
if toggle_sw == 5:
sortwindslist.insert(0,(str(time_sw5) + " hr TAF", "Displayed"))
if toggle_sw == 6:
sortwindslist.insert(0,(str(time_sw6) + " hr TAF", "Displayed"))
if toggle_sw == 7:
sortwindslist.insert(0,(str(time_sw7) + " hr TAF", "Displayed"))
if toggle_sw == 8:
sortwindslist.insert(0,(str(time_sw8) + " hr TAF", "Displayed"))
if toggle_sw == 9:
sortwindslist.insert(0,(str(time_sw9) + " hr TAF", "Displayed"))
if toggle_sw == 10:
sortwindslist.insert(0,(str(time_sw10) + " hr TAF", "Displayed"))
if toggle_sw == 11:
sortwindslist.insert(0,(str(time_sw11) + " hr TAF", "Displayed"))
if toggle_sw == 12:
sortwindslist.insert(0,(str(time_sw0) + " hr TAF", "Displayed"))
elif metar_taf_mos == 2: #MOS hour_to_display
if toggle_sw == 0:
sortwindslist.insert(0,(str(time_sw0) + " hr MOS", "Displayed"))
if toggle_sw == 1:
sortwindslist.insert(0,(str(time_sw1) + " hr MOS", "Displayed"))
if toggle_sw == 2:
sortwindslist.insert(0,(str(time_sw2) + " hr MOS", "Displayed"))
if toggle_sw == 3:
sortwindslist.insert(0,(str(time_sw3) + " hr MOS", "Displayed"))
if toggle_sw == 4:
sortwindslist.insert(0,(str(time_sw4) + " hr MOS", "Displayed"))
if toggle_sw == 5:
sortwindslist.insert(0,(str(time_sw5) + " hr MOS", "Displayed"))
if toggle_sw == 6:
sortwindslist.insert(0,(str(time_sw6) + " hr MOS", "Displayed"))
if toggle_sw == 7:
sortwindslist.insert(0,(str(time_sw7) + " hr MOS", "Displayed"))
if toggle_sw == 8:
sortwindslist.insert(0,(str(time_sw8) + " hr MOS", "Displayed"))
if toggle_sw == 9:
sortwindslist.insert(0,(str(time_sw9) + " hr MOS", "Displayed"))
if toggle_sw == 10:
sortwindslist.insert(0,(str(time_sw10) + " hr MOS", "Displayed"))
if toggle_sw == 11:
sortwindslist.insert(0,(str(time_sw11) + " hr MOS", "Displayed"))
if toggle_sw == 12:
sortwindslist.insert(0,(str(time_sw0) + " hr MOS", "Displayed"))
#Display welcome message via OLED displays if 'usewelcome = 1'
if usewelcome and toggle_sw != -1: #if toggle_sw == -1 then this script just started. Suppress welcome message for now
logger.info("Use Welcome Enabled")
if oledposorder == 0:
startnum = 0 #values are for oleds wired normally, pos 0 thru 7
stopnum = numofdisplays
stepnum = 1
else:
startnum = numofdisplays-1 #these values are for oleds wired backwards, pos 7 thru 0
stopnum = -1
stepnum = -1
font = boldfont
arrowdir = '' #No arrow needed
j = 0
welcomelist = list(welcome.split(" ")) #create a list to use to display a welcome message if desired
if displayIP: #will display the RPI's local IP address along with welcome message if desired.
splitIP = re.sub(r'^(.*?(\..*?){1})\.', r'\1\n.', str(s.getsockname()[0])) #split IP into 2 lines
logger.debug(splitIP)
welcomelist = welcomelist + [splitIP] #[splitIP] #split into 2 lines
# welcomelist = welcomelist + [str(s.getsockname()[0])] #all on one line
if len(welcomelist) < numofdisplays:
pad = int((numofdisplays - len(welcomelist))/2)
welcomelist = ([''] * pad) + welcomelist
blanks = [''] * numofdisplays #add blanks to end of message to clean display after message
welcomelist = welcomelist + blanks
if GPIO.input(4) == 1: #Set dimming level
dimming = 1 #1 = full dim
else:
dimming = dimswitch #Brightess setting. dimswitch can be 0,1 or 2. 1 is most dim, 2 medium dim.
logger.debug(welcomelist)
while j < len(welcomelist):
for ch in range(startnum, stopnum, stepnum):
if j < len(welcomelist):
word = welcomelist[j]
else:
word = ''
oledcenter(word, ch, font, arrowdir, dimming, toggle, 0)
if numofdisplays == 1:
time.sleep(oledpause)
else:
time.sleep(oledpause/4)
j += 1
#Loop through the airports and display the winds till its time to update the weather from the FAA
#Setup timed loop for updating FAA Weather that will run based on the value of update_interval which is a user setting
k = 0 #counter for displaying local time is desired.
if toggle_sw != -1: #check to see if this is the first time through and bypass if it is.
if oledused:
clearoleddisplays()
if lcddisplay:
lcd.clear()
timeout_start = time.time() #When timer hits user-defined value, go back to outer loop to update FAA Weather.
while time.time() < timeout_start + (update_interval * 60): #take "update_interval" which is in minutes and turn into seconds
#If the rotary switch is in Heat Map Mode, display such on the displays.
if metar_taf_mos == 3:
if lcddisplay:
lcd.clear()
lcd.cursor_mode = 'hide'
loop_string("Heat Map Mode", lcd, framebuffer, 1, 16, lcdpause)
if oledused: #Display top AP Landings list on oleds
arrowdir = ''
dimming = 0
toggle = 0
j = 0
ch = 0
logger.debug(hmdata_sorted)
while j < 10:
for ch in range(startnum, stopnum, stepnum): #numofdisplays-1, -1, -1):
if j == 0:
val = hmdata_sorted[j]
elif j > 10:
val = ''
else:
hmap, numland = hmdata_sorted[j]
val = hmap + "\n" + '#' + str(j)
oledcenter(val, ch, font, arrowdir, dimming, toggle, 0) #send airport and winds to proper oled display
j += 1
if numofdisplays == 1:
time.sleep(oledpause)
else:
time.sleep(oledpause/4)
#Routine to restart this script if config.py is changed while this script is running.
for f, mtime in WATCHED_FILES_MTIMES:
if getmtime(f) != mtime:
logger.info("Restarting from awake" + __file__ + " in 2 sec...")
time.sleep(2)
os.execv(sys.executable, [sys.executable] + [__file__]) #'/NeoSectional/metar-display-v4.py'
#Timer routine, used to turn off LED's at night if desired. Use 24 hour time in settings.
if usetimer: #check to see if the user wants to use a timer.
if time_in_range(timeoff, end_time, datetime.now().time()): #Part of Timer Fix - Thank You to <NAME>
# If temporary lights-on period from refresh button has expired, restore the original light schedule
#Part of Timer Fix
if temp_lights_on == 1:
end_time = lights_on
timeoff = lights_out
temp_lights_on = 0
logger.info("Display Going to Sleep")
if lcddisplay:
lcd.clear()
if oledused:
tmp1 = border
border = 0
clearoleddisplays() #clear displays with no borders
border = tmp1
while time_in_range(timeoff, end_time, datetime.now().time()): #Part of timer fix
# sys.stdout.write ("z")
# sys.stdout.flush ()
if sleepmsg == 1: #Display "Sleeping" message on first oled if desired. 0 = No, 1 = Yes
rch = random.randint(0,numofdisplays-1)
oledcenter("Sleeping", rch, font, "", 1, toggle) #send airport and winds to proper oled display
time.sleep(2)
clearoleddisplays()
temp_timeoff = timeoff #store original timeoff time and restore later.
time.sleep(1)
if GPIO.input(22) == False: #Pushbutton for Refresh. check to see if we should turn on temporarily during sleep mo$
# Set to turn lights on two seconds ago to make sure we hit the loop next time through - Part of Timer Fix
end_time = (datetime.now()-timedelta(seconds=2)).time()
timeoff = (datetime.now()+timedelta(minutes=tempsleepon)).time()
temp_lights_on = 1 #Set this to 1 if button is pressed
logger.info("Sleep interrupted by button push")
#Routine to restart this script if config.py is changed while this script is running.
for f, mtime in WATCHED_FILES_MTIMES:
if getmtime(f) != mtime:
logger.info("Restarting from sleep " + __file__ + " in 2 sec...")
time.sleep(2)
os.execv(sys.executable, [sys.executable] + [__file__]) #'/NeoSectional/metar-display-v4.py'
# print ("\033[0;0m\n") #Turn off Purple text.
#Check if rotary switch is used, and what position it is in. This will determine what to display, METAR or TAF data.
#If TAF data, what time offset should be displayed, i.e. 0 hour, 1 hour, 2 hour etc.
#If there is no rotary switch installed, then all these tests will fail and will display the defaulted data from Switch Position 0
if GPIO.input(0) == False and toggle_sw != 0:
toggle_sw = 0
hour_to_display = time_sw0 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw0 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 0. Breaking out of loop for METARs')
break
elif GPIO.input(5) == False and toggle_sw != 1:
toggle_sw = 1
hour_to_display = time_sw1 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw1 #0 = Display TAF. 1 = Display METAR. 2 = MOS. | |
"""
Copyright Government of Canada 2020-2021
Written by: <NAME>, National Microbiology Laboratory,
Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import argparse
import csv
from pybiomart import Server
import pysam
from pathlib import Path
import os
import sys
import numpy as np
import pandas as pd
import uuid
import urllib
import tempfile
import yaml
from filelock import FileLock
import subprocess
import bgzip
from gnali.exceptions import EmptyFileError, TBIDownloadError, \
InvalidConfigurationError, InvalidFilterError, \
NoVariantsAvailableError
from gnali.filter import Filter
from gnali.variants import Variant, Gene
from gnali.dbconfig import Config, RuntimeConfig, create_template
import gnali.outputs as outputs
from gnali.vep import VEP
from gnali.gnali_get_data import verify_files_present
from gnali.files import download_file
from gnali.logging import Logger
import pkg_resources
SCRIPT_NAME = 'gNALI'
SCRIPT_INFO = "Given a list of genes to test, gNALI finds all potential \
loss of function variants of those genes."
GNALI_PATH = Path(__file__).parent.absolute()
DATA_PATH = "{}/data".format(str(GNALI_PATH))
DB_CONFIG_FILE = "{}/db-config.yaml".format(str(DATA_PATH))
def open_test_file(input_file):
"""Read genes from the input file.
Args:
input_file: input file containing genes to find
(csv, tsv/tab, txt)
"""
test_genes_list = []
try:
with open(input_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for gene in csv_reader:
if not str(gene):
break
test_genes_list.append(", ".join(gene))
except FileNotFoundError:
raise FileNotFoundError("Input file {} was not "
"found".format(input_file))
except Exception:
raise Exception("something went wrong, try again")
if len(test_genes_list) == 0:
raise EmptyFileError("input file {} is empty".format(input_file))
return test_genes_list
def get_human_genes(db_info):
"""Connect to the Ensembl database and get the human gene dataset.
Keep only required fields.
Args:
db_info: RuntimeConfig object with database info
"""
reference = db_info.ref_genome_path
server = Server(host=reference)
dataset = (server.marts['ENSEMBL_MART_ENSEMBL']
.datasets['hsapiens_gene_ensembl'])
# Create list of human chromosomes.
# Use this to filter out gene patches
chromosome_filters = [str(x) for x in range(1, 23)]
chromosome_filters.extend(['X', 'Y'])
genes = dataset.query(attributes=['hgnc_symbol', 'chromosome_name',
'start_position', 'end_position'],
filters={'chromosome_name': chromosome_filters})
return genes
def get_test_gene_descriptions(genes, db_info, logger, verbose_on):
"""Filter Ensembl human genes for info related to test genes.
Args:
genes: list of Gene objects
db_info: RuntimeConfig object with database info
logger: Logger object
verbose_on: boolean for verbose mode
"""
gene_descriptions = get_human_genes(db_info)
gene_descriptions.columns = ['hgnc_symbol', 'chromosome_name',
'start_position', 'end_position']
gene_descriptions = gene_descriptions[~gene_descriptions['chromosome_name']
.str.contains('PATCH')]
target_gene_names = [gene.name for gene in genes]
gene_descriptions = gene_descriptions[(gene_descriptions['hgnc_symbol']
.isin(target_gene_names))]
gene_descriptions.reset_index(drop=True, inplace=True)
unavailable_genes = [gene for gene in target_gene_names if gene not in
list(gene_descriptions['hgnc_symbol'])]
for gene in genes:
if gene.name in unavailable_genes:
gene.set_status("Unknown gene")
continue
if len(unavailable_genes) > 0 and verbose_on:
logger.write("Genes not available in Ensembl {} database (skipping):"
.format(db_info.ref_genome_name))
for gene in unavailable_genes:
logger.write(gene)
return genes, gene_descriptions
def find_test_locations(genes, gene_descs, db_info):
"""Using results from the Ensembl database, build a list of target genes.
Args:
genes: list of Gene objects
gene_descriptions: results from Ensembl database
from get_test_gene_descriptions()
db_info: RuntimeConfig object
"""
# Format targets for Tabix
prefix = "chr" if db_info.ref_genome_name == "GRCh38" else ""
for gene in genes:
if gene.status is None:
index = gene_descs.index[gene_descs.hgnc_symbol == gene.name][0]
chrom = gene_descs.loc[gene_descs.index[index], 'chromosome_name']
start = gene_descs.loc[gene_descs.index[index], 'start_position']
end = gene_descs.loc[gene_descs.index[index], 'end_position']
gene.set_location(location="{prefix}{}:{}-{}"
.format(chrom, start, end,
prefix=prefix))
return genes
def get_db_config(config_file, db):
"""Read and parse the database configuration file.
Args:
config_file: config file (.yaml) path
db: database whose config we want to use
"""
try:
with open(config_file, 'r') as config_stream:
db_config = Config(db, yaml.load(config_stream.read(),
Loader=yaml.FullLoader))
db_config.validate_config()
return db_config
except InvalidConfigurationError:
raise
except Exception as error:
print("Could not read from database configuration "
"file:", config_file)
raise Exception(error)
def validate_filters(config, predefined_filters, additional_filters):
"""Validate that user-given predefined filters exist in
the config file, and that additional filters are in the
correct format.
Args:
config: database configuration object
predefined_filters: list of user-specified predefined filters
additional_filters: list of user-given additional filters
"""
if predefined_filters is not None:
for pre_filter in predefined_filters:
try:
config.validate_predefined_filter(pre_filter)
except InvalidFilterError:
raise InvalidFilterError(pre_filter)
if additional_filters is not None:
for add_filter in additional_filters:
try:
add_filter = Filter(add_filter, add_filter)
except ValueError:
raise InvalidFilterError(add_filter)
def transform_filters(db_config, pre_filters, add_filters):
"""Transform predefined and additional filters
into Filter objects in a single list.
Args:
db_config: database configuration object
pre_filters: list of user-specified predefined filters
add_filters: list of user-given additional filters
"""
filter_objs = []
# add predefined filters specified
if pre_filters is not None:
filter_objs = {filt: db_config.predefined_filters[filt]
for filt in pre_filters}
filter_objs = [Filter(key, value) for key, value
in filter_objs.items()]
# add additional filters specified
if add_filters is not None:
add_filters = [Filter(filt, filt) for filt in add_filters]
filter_objs.extend(add_filters)
return filter_objs
def tbi_needed(url, dest_path):
"""Given a tbi url, determine if we must download it.
We will download it if it does not yet exist or it
exists but the size is not what we expect based on
the header info.
Args:
url: tbi url
dest_path: path of expected tbi
"""
try:
url_req = urllib.request.Request(url, method='HEAD')
url_f = urllib.request.urlopen(url_req)
file_size = int(url_f.headers['Content-Length'])
except (urllib.error.HTTPError, urllib.error.URLError,
urllib.error.ContentTooShortError):
raise TBIDownloadError("could not get header for .tbi "
"file for {}".format(url))
except TimeoutError:
raise TimeoutError("could not fetch header for {} \
before timeout".format(url))
except Exception as error:
raise Exception(error)
if not Path.is_file(Path(dest_path)) or \
file_size != os.path.getsize(dest_path):
return True
return False
def get_db_tbi(file_info, data_path, max_time):
"""Download the index (.tbi) file for a database.
Args:
file_info: a DataFile object
data_path: where to save the index file
max_time: maximum time to wait for
download. An exception is
raised if download doesn't
complete in this time.
"""
file_path = file_info.path
file_name = file_path.split("/")[-1]
tbi_path = ''
if file_info.is_local and not file_info.is_compressed:
# compress local file to .bgz (required for Tabix)
data_bgz = compress_vcf(file_path, data_path, file_name)
file_info.set_compressed_path(data_bgz)
subprocess.run(['tabix', data_bgz])
tbi_path = "{}/{}.bgz.tbi".format(data_path, file_name)
elif file_info.is_local and file_info.is_compressed:
subprocess.run(['cp', file_info.path, data_path])
file_copy = "{}/{}".format(data_path, file_name)
subprocess.run(['tabix', file_copy])
tbi_path = "{}/{}".format(data_path, file_name)
elif file_info.is_http and file_info.is_compressed:
tbi_url = "{}.tbi".format(file_path)
tbi_path = "{}/{}.tbi".format(data_path, file_name)
tbi_lock_path = "{}.lock".format(tbi_path)
# lock access to index file
lock = FileLock(tbi_lock_path)
try:
with lock.acquire(timeout=max_time):
if tbi_needed(tbi_url, tbi_path):
download_file(tbi_url, tbi_path, max_time)
# not able to gain access to index in time
except TimeoutError:
# download index file to temp directory
temp = tempfile.TemporaryDirectory()
tbi_path = "{}/{}.tbi".format(temp.name, file_name)
download_file(tbi_url, tbi_path, max_time)
except Exception as error:
raise Exception(error)
elif file_info.is_http and not file_info.is_compressed:
local_path = "{}/{}".format(data_path, file_name)
download_file(file_path, local_path, max_time)
data_bgz = compress_vcf(local_path, data_path, file_name)
file_info.set_compressed_path(data_bgz)
subprocess.run(['tabix', data_bgz])
tbi_path = "{}/{}.bgz.tbi".format(data_path, file_name)
return tbi_path
def compress_vcf(path, data_path, file_name):
data_bgz = None
with open(path, 'rb') as data_stream:
data_bgz = "{}/{}.bgz".format(data_path, file_name)
with open(data_bgz, 'wb') as bgz_stream:
with bgzip.BGZipWriter(bgz_stream) as fh:
fh.writelines(data_stream.readlines())
return data_bgz
def get_variants(genes, db_info, filter_objs, output_dir,
logger, verbose_on):
"""Query the gnomAD database for variants with Tabix,
apply loss-of-function filters, user-specified predefined
filters, and user-specified additional filters.
Args:
genes: list of Gene objects
db_info: configuration of database
filter_objs: list of all (predefined and additional)
filters as Filter objects
output_dir: directory to write output to
logger: Logger object to log errors to
verbose_on: boolean for verbose mode
"""
variants = np.array([])
max_time = 180
header = None
temp_dir = tempfile.TemporaryDirectory()
temp_name = "{}/".format(temp_dir.name)
# Tracks if gene was found in any database file
coverage = {gene.name: False for gene in genes}
for data_file in db_info.files:
tbi = None
tbx = None
# for files that are local (vcf and vcf.bgz), or HTTP vcf
if data_file.is_local or not data_file.is_compressed:
tbi = get_db_tbi(data_file, temp_name, max_time)
tbx = pysam.TabixFile(data_file.compressed_path,
index=tbi)
# for files that are HTTP vcf.bgz
else:
tbi = get_db_tbi(data_file, DATA_PATH, max_time)
tbx = pysam.TabixFile(data_file.path, index=tbi)
header = tbx.header
# get records in locations
for gene in genes:
if gene.location is None:
continue
try:
records = tbx.fetch(reference=gene.location)
coverage[gene.name] = True
if not db_info.has_lof_annots:
header, variants = VEP.annotate_vep_loftee(header,
variants,
db_info)
lof_index = None
# get index of LoF in header
annot_header = [line for line in header
if "ID={}".format(db_info.lof['id'])
in line][0]
lof_index = annot_header.split("|") \
.index(db_info.lof['annot'])
# update to convert to Variants before filter calls
records = [Variant(gene.name, record, db_info.lof['id'],
db_info.lof['annot'], annot_header) for
record in records]
# filter records
records = filter_plof(genes, records, db_info, lof_index)
records = apply_filters(genes, records, db_info, filter_objs)
gene.add_variants(records)
except ValueError as error:
| |
text in sorted_li]
return json.dumps(OrderedDict(reformatted_li))
def abstract_meaning(self):
# this is used for displaying morpheme keywords per language in the morpheme list view
abstract_meaning = []
if self.dataset:
for language in self.dataset.translation_languages.all():
translations = self.translation_set.filter(language=language).order_by('translation__text')
abstract_meaning.append((language, translations))
else:
language = Language.objects.get(id=get_default_language_id())
translations = self.translation_set.filter(language=language).order_by('translation__text')
abstract_meaning.append((language, translations))
return abstract_meaning
def generate_fieldname_to_kind_table():
temp_field_to_kind_table = dict()
for f in Gloss._meta.fields:
f_internal_type = f.get_internal_type()
if f_internal_type in ['NullBooleanField', 'BooleanField']:
temp_field_to_kind_table[f.name] = 'check'
elif f_internal_type in ['CharField', 'TextField'] and not hasattr(f, 'field_choice_category'):
temp_field_to_kind_table[f.name] = 'text'
elif hasattr(f, 'field_choice_category'):
temp_field_to_kind_table[f.name] = 'list'
else:
temp_field_to_kind_table[f.name] = f_internal_type
for h in Handshape._meta.fields:
h_internal_type = h.get_internal_type()
if h.name not in temp_field_to_kind_table.keys():
if h_internal_type in ['NullBooleanField', 'BooleanField']:
temp_field_to_kind_table[h.name] = 'check'
elif h_internal_type in ['CharField', 'TextField'] and not hasattr(h, 'field_choice_category'):
temp_field_to_kind_table[h.name] = 'text'
elif hasattr(h, 'field_choice_category'):
temp_field_to_kind_table[h.name] = 'list'
else:
temp_field_to_kind_table[h.name] = h_internal_type
else:
# field h already appears in the table
if h_internal_type != temp_field_to_kind_table[h.name]:
# does this happen?
print('generate_fieldname_to_kind_table: identical fieldname in Gloss and Handshape with different kinds: ', h.name)
for d in Definition._meta.fields:
d_internal_type = d.get_internal_type()
if d.name not in temp_field_to_kind_table.keys():
if d_internal_type in ['NullBooleanField', 'BooleanField']:
temp_field_to_kind_table[d.name] = 'check'
elif d_internal_type in ['CharField', 'TextField'] and not hasattr(d, 'field_choice_category'):
temp_field_to_kind_table[d.name] = 'text'
elif hasattr(d, 'field_choice_category'):
temp_field_to_kind_table[d.name] = 'list'
else:
temp_field_to_kind_table[d.name] = d_internal_type
else:
# field h already appears in the table
if d_internal_type != temp_field_to_kind_table[d.name]:
# does this happen?
print('generate_fieldname_to_kind_table: identical fieldname in Gloss or Handshape and Definition with different kinds: ', d.name)
return temp_field_to_kind_table
fieldname_to_kind_table = generate_fieldname_to_kind_table()
class SimultaneousMorphologyDefinition(models.Model):
parent_gloss = models.ForeignKey(Gloss, related_name='simultaneous_morphology')
role = models.CharField(max_length=100)
morpheme = models.ForeignKey(Morpheme, related_name='glosses_containing')
def __str__(self):
return self.parent_gloss.idgloss
class BlendMorphology(models.Model):
parent_gloss = models.ForeignKey(Gloss, related_name='blend_morphology')
role = models.CharField(max_length=100)
glosses = models.ForeignKey(Gloss, related_name='glosses_comprising')
def __str__(self):
return self.parent_gloss.idgloss
class OtherMedia(models.Model):
"""Videos of or related to a gloss, often created by another project"""
parent_gloss = models.ForeignKey(Gloss)
type = models.CharField(max_length=5, choices=build_choice_list('OtherMediaType'))
type.field_choice_category = 'OtherMediaType'
alternative_gloss = models.CharField(max_length=50)
path = models.CharField(max_length=100)
def get_othermedia_path(self, gloss_id, check_existence=False):
# read only method
"""Returns a tuple (media_okay, path, filename) """
# handles paths stored in OtherMedia objects created by legacy code that may have the wrong folder
media_okay = True
this_path = self.path
import os
norm_path = os.path.normpath(this_path)
split_norm_path = norm_path.split(os.sep)
if len(split_norm_path) == 1:
# other media path is a filename
path = 'dictionary/protected_media/othermedia/' + self.path
media_okay = False
other_media_filename = self.path
elif len(split_norm_path) == 2 and split_norm_path[0] == str(gloss_id):
# other media path is gloss_id / filename
path = 'dictionary/protected_media/othermedia/' + self.path
other_media_filename = split_norm_path[-1]
else:
# other media path is not a filename and not the correct folder, do not prefix it
media_okay = False
path = self.path
other_media_filename = split_norm_path[-1]
if media_okay:
# self.path is okay, make sure it exists
if check_existence:
# check whether the file exists in the writable folder
# NOTE: Here is a discrepancy with the setting OTHER_MEDIA_DIRECTORY, it ends with a /
# os.path.exists needs a path, not a string of a path
writable_location = os.path.join(WRITABLE_FOLDER,'othermedia',self.path)
try:
imagefile_path_exists = os.path.exists(writable_location)
except (UnicodeEncodeError, IOError, OSError):
# this is needed in case there is something wrong with the permissions
imagefile_path_exists = False
if not imagefile_path_exists:
media_okay = False
return media_okay, path, other_media_filename
class Dataset(models.Model):
"""A dataset, can be public/private and can be of only one SignLanguage"""
name = models.CharField(unique=True, blank=False, null=False, max_length=60)
is_public = models.BooleanField(default=False, help_text="Is this dataset public or private?")
signlanguage = models.ForeignKey("SignLanguage")
translation_languages = models.ManyToManyField("Language", help_text="These languages are shown as options"
"for translation equivalents.")
default_language = models.ForeignKey('Language', on_delete=models.DO_NOTHING,
related_name='datasets_with_default_language',
null=True)
description = models.TextField()
conditions_of_use = models.TextField(blank=True, help_text="Conditions of Use. Content license."
"This is different than the software code license.")
copyright = models.TextField(blank=True, help_text="Copyright. Content license."
"This is different than the software code license.")
reference = models.TextField(blank=True, help_text="")
acronym = models.CharField(max_length=10, blank=True, help_text="Abbreviation for the dataset")
owners = models.ManyToManyField(User, help_text="Users responsible for the dataset content.")
exclude_choices = models.ManyToManyField('FieldChoice', help_text="Exclude these field choices", blank=True)
class Meta:
permissions = (
('view_dataset', _('View dataset')),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Keep original acronym for changes to GlossVideos
self._initial = model_to_dict(self, fields=['acronym', 'default_language'])
def __str__(self):
return self.acronym
def generate_short_name(self):
CHARACTER_THRESHOLD = 15
if len(self.acronym) <= CHARACTER_THRESHOLD:
return self.acronym
else:
# Cut off last word
if len(self.acronym.split()) > 1:
result = ' '.join(self.acronym.split()[:-1])
if len(result) <= CHARACTER_THRESHOLD:
return result
else:
result = self.acronym
return result[:CHARACTER_THRESHOLD]
def get_metadata_path(self, check_existance=True):
"""Returns the path within the writable and static folder"""
metafile_name = self.acronym + '_metadata.csv'
goal_string = WRITABLE_FOLDER + DATASET_METADATA_DIRECTORY + '/' + metafile_name
if check_existance and os.path.exists(goal_string): #os.path.join(settings.WRITABLE_FOLDER, imagefile_path)):
return goal_string
return ''
def metadata_url(self):
metafile_name = self.acronym + '_metadata.csv'
goal_string = DATASET_METADATA_DIRECTORY + '/' + metafile_name
return goal_string
def uploaded_eafs(self):
dataset_eaf_folder = WRITABLE_FOLDER + DATASET_EAF_DIRECTORY + '/' + self.acronym
uploaded_eafs = []
if os.path.isdir(dataset_eaf_folder):
for filename in os.listdir(dataset_eaf_folder):
uploaded_eafs.append(filename)
uploaded_eafs.sort()
return uploaded_eafs
def count_glosses(self):
count_glosses = Gloss.objects.filter(lemma__dataset_id=self.id).count()
return count_glosses
def get_users_who_can_view_dataset(self):
all_users = User.objects.all().order_by('first_name')
users_who_can_view_dataset = []
import guardian
from guardian.shortcuts import get_objects_for_user, get_users_with_perms
users_who_can_access_me = get_users_with_perms(self, attach_perms=True, with_superusers=False,
with_group_users=False)
for user in all_users:
if user in users_who_can_access_me.keys():
if 'view_dataset' in users_who_can_access_me[user]:
users_who_can_view_dataset.append(user)
return users_who_can_view_dataset
def get_users_who_can_change_dataset(self):
all_users = User.objects.all().order_by('first_name')
users_who_can_change_dataset = []
import guardian
from guardian.shortcuts import get_objects_for_user, get_users_with_perms
users_who_can_access_me = get_users_with_perms(self, attach_perms=True, with_superusers=False,
with_group_users=False)
for user in all_users:
if user in users_who_can_access_me.keys():
if 'change_dataset' in users_who_can_access_me[user]:
users_who_can_change_dataset.append(user)
return users_who_can_change_dataset
def generate_frequency_dict(self, language_code):
codes_to_adjectives = dict(settings.LANGUAGES)
if language_code not in codes_to_adjectives.keys():
adjective = settings.FALLBACK_FIELDCHOICE_HUMAN_LANGUAGE
else:
adjective = codes_to_adjectives[language_code].lower()
# sort the phonology fields based on field label in the designated language
fields_data = []
for field in Gloss._meta.fields:
if field.name in FIELDS['phonology'] + FIELDS['semantics']:
if hasattr(field, 'field_choice_category'):
fc_category = field.field_choice_category
fields_data.append((field.name, field.verbose_name.title(), fc_category))
# CHOICE_LISTS dictionary, maps from field name to pairs of ( _ machine value , translated verbose name )
# The choice list will be sorted on the translated verbose name
choice_lists = dict()
for (f, field_verbose_name, fieldchoice_category) in fields_data:
if fieldchoice_category:
choice_list_this_field = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by(adjective + '_name')
# make a dictionary using the field name so we can look up the translated choices later
choice_lists[f] = choicelist_queryset_to_translated_dict(choice_list_this_field, language_code, ordered=False)
# Sort the data by the translated verbose name field
ordered_fields_data = sorted(fields_data, key=lambda x: x[1])
frequency_lists_phonology_fields = OrderedDict()
# To generate the correct order, iterate over the ordered fields data, which is ordered by translated verbose name
for (f, field_verbose_name, fieldchoice_category) in ordered_fields_data:
choice_list_this_field = FieldChoice.objects.filter(field__iexact=fieldchoice_category).order_by(adjective + '_name')
# We now basically construct a duplicate of the choice_lists dict, but with the machine values instead of the labels
# The machine value is what is stored as the value of the field in the Gloss objects
# We take the count of the machine value in the Gloss objects
# ordered = True means return an OrderedDict instead of a list
choice_list_machine_values = choicelist_queryset_to_machine_value_dict(choice_list_this_field, ordered=True)
# get dictionary of translated field choices CHOICE_LISTS for this field in sorted order (as per the language code)
sorted_field_choices = copy.deepcopy(choice_lists[f])
# Because we're dealing with multiple languages and we want the fields to be sorted for the language,
# we maintain the order of the fields established for the choice_lists dict of field choice names
choice_list_frequencies = OrderedDict()
for choice, label in sorted_field_choices:
machine_value = choice_list_machine_values[choice]
# empty values can be either 0 or else null
# the raw query is constructed for this case separately from the case for actual values
if machine_value == 0:
choice_list_frequencies[choice] = Gloss.objects.filter(Q(lemma__dataset=self),
Q(**{f + '__isnull': True}) |
Q(**{f: 0})).count()
else:
variable_column = f
search_filter = 'exact'
filter = variable_column + '__' + search_filter
choice_list_frequencies[choice] = Gloss.objects.filter(lemma__dataset=self.id).filter(
**{filter: machine_value}).count()
# the new frequencies for this field are added using the update method to insure the order is maintained
frequency_lists_phonology_fields.update({f: copy.deepcopy(choice_list_frequencies)})
return frequency_lists_phonology_fields
class UserProfile(models.Model):
# This field is required.
user = models.OneToOneField(User, related_name="user_profile_user")
# Other fields here
last_used_language = models.CharField(max_length=20, default=settings.LANGUAGE_CODE)
expiry_date = models.DateField(null=True, blank=True)
number_of_logins = models.IntegerField(null=True, default=0)
comments = models.CharField(max_length=500, null=True, blank=True)
selected_datasets = models.ManyToManyField(Dataset)
def save(self, *args, **kwargs):
if not self.pk:
try:
p = UserProfile.objects.get(user=self.user)
self.pk = p.pk
except UserProfile.DoesNotExist:
pass
super(UserProfile, self).save(*args, **kwargs)
def __str__(self):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.